Skip to content

Docker Compose Configuration

The monolithic docker-compose.yaml that runs all services on ubuntu-docker.

# x-logging entry for "default-logging" - Restricts log files from growing too large
#   The best solution in case someone forgets to turn off debug logging for a service
x-logging: &default-logging
  driver: "json-file"
  options:
    max-size: "10m"
    max-file: "3"

services:

# Internet Connectivity

  traefik:
    image: traefik:latest
    container_name: traefik
    logging: *default-logging
    restart: unless-stopped
    security_opt:
      - no-new-privileges:true
    networks:
      - proxy
    ports:
      - "192.168.1.3:80:80"
      - "192.168.1.3:443:443"
    environment:
      - TZ=${TZ}
      - CF_DNS_API_TOKEN=${CF_DNS_API_TOKEN}
      - TRAEFIK_AUTH=${TRAEFIK_AUTH}
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - ${APPDATA}/traefik/traefik.yml:/traefik.yml:ro
      - ${APPDATA}/traefik/dynamic:/dynamic:ro
      - ${APPDATA}/traefik/acme.json:/acme.json
    command:
      - "--configFile=/traefik.yml"

  tunnel:
    image: cloudflare/cloudflared:latest
    container_name: cloudflared-tunnel
    logging: *default-logging
    restart: unless-stopped
    command: tunnel run
    environment:
      - TUNNEL_TOKEN=${CLOUDFLARE_TUNNEL_TOKEN}
    networks:
      - proxy
  
  tailscale:
    image: tailscale/tailscale:latest
    container_name: tailscale
    logging: *default-logging
    hostname: 819-ubuntu-docker
    networks:
      macvlan:
        ipv4_address: 192.168.1.8
    cap_add:
      - NET_ADMIN
      - SYS_MODULE
    volumes:
      - ${APPDATA}/tailscale:/var/lib/tailscale
      - /dev/net/tun:/dev/net/tun
    environment:
      - TS_AUTHKEY=${TS_AUTHKEY}
      - TS_STATE_DIR=/var/lib/tailscale
      - TS_USERSPACE=false
      - TS_EXTRA_ARGS=--advertise-routes=192.168.1.5/32 --accept-routes
    restart: unless-stopped

  authentik-redis:
    image: docker.io/library/redis
    container_name: authentik-redis
    logging: *default-logging
    command: --save 60 1 --loglevel warning
    restart: unless-stopped
    networks:
      - internal
    volumes:
      - ${APPDATA}/authentik/redis:/data

  authentik-server:
    image: ghcr.io/goauthentik/server:${AUTHENTIK_TAG}
    container_name: authentik-server
    logging: *default-logging
    restart: unless-stopped
    command: server
    networks:
      - proxy
      - internal
    environment:
      AUTHENTIK_REDIS__HOST: authentik-redis
      AUTHENTIK_POSTGRESQL__HOST: postgres
      AUTHENTIK_POSTGRESQL__NAME: ${AUTHENTIK_PG_DB}
      AUTHENTIK_POSTGRESQL__USER: ${AUTHENTIK_PG_USER}
      AUTHENTIK_POSTGRESQL__PASSWORD: ${AUTHENTIK_PG_PASS}
      AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY}
      # NOTE: Do not define TZ (timezone)
    volumes:
      - ${APPDATA}/authentik/media:/media
      - ${APPDATA}/authentik/custom-templates:/templates
      # NOTE: Do NOT mount /etc/localtime or /etc/timezone here. Authentik needs UTC.
    depends_on:
      postgres:
        condition: service_healthy
      authentik-redis:
        condition: service_started

  authentik-worker:
    image: ghcr.io/goauthentik/server:${AUTHENTIK_TAG}
    container_name: authentik-worker
    logging: *default-logging
    restart: unless-stopped
    command: worker
    networks:
      - internal
    environment:
      AUTHENTIK_REDIS__HOST: authentik-redis
      AUTHENTIK_POSTGRESQL__HOST: postgres
      AUTHENTIK_POSTGRESQL__NAME: ${AUTHENTIK_PG_DB}
      AUTHENTIK_POSTGRESQL__USER: ${AUTHENTIK_PG_USER}
      AUTHENTIK_POSTGRESQL__PASSWORD: ${AUTHENTIK_PG_PASS}
      AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY}
    user: root
    volumes:
      - ${APPDATA}/authentik/media:/media
      - ${APPDATA}/authentik/certs:/certs
      - ${APPDATA}/authentik/custom-templates:/templates
    depends_on:
      postgres:
        condition: service_healthy
      authentik-redis:
        condition: service_started

  pihole:
    image: pihole/pihole:latest
    container_name: pihole
    logging: *default-logging
    hostname: pihole
    restart: unless-stopped
    dns: # For container startup only
      - 1.1.1.1
      - 8.8.8.8
    cap_add:
      - NET_ADMIN
      - SYS_TIME
      - SYS_NICE
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
      - FTLCONF_dns_listeningMode=ALL
      - FTLCONF_webserver_api_password=${PIHOLE_PASSWORD}
      - FTLCONF_dns_reply_host_force4=true
      - FTLCONF_dns_reply_host_IPv4=192.168.1.4
      - "FTLCONF_misc_dnsmasq_lines=address=/${FQDN}/192.168.1.3;server=/${FQDN}/"
      - WEBTHEME=default-dark
      - IPv6=false
    ports:
      - "192.168.1.4:53:53/tcp"
      - "192.168.1.4:53:53/udp"
      - "192.168.1.4:80:80/tcp"
    volumes:
      - '${APPDATA}/pihole/etc-pihole/:/etc/pihole/'
      - '${APPDATA}/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/'
    networks:
      - proxy

  postgres:
    image: docker.io/library/postgres:16
    container_name: postgres
    logging: *default-logging
    restart: unless-stopped
    networks:
      - internal
    environment:
      # Main Credentials
      POSTGRES_USER: ${POSTGRES_SUPER_USER}
      POSTGRES_PASSWORD: ${POSTGRES_SUPER_PASS}
      # Add any app passwords below so the init script can use them
      AUTHENTIK_PG_PASS: ${AUTHENTIK_PG_PASS}
    volumes:
      - ${APPDATA}/postgres/data:/var/lib/postgresql/data
      - ${APPDATA}/postgres/init:/docker-entrypoint-initdb.d:ro
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER}"]
      interval: 10s
      timeout: 5s
      retries: 5

# arr suite

  bazarr:
    image: lscr.io/linuxserver/bazarr:latest
    container_name: bazarr
    logging: *default-logging
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
    volumes:
      - '${APPDATA}/bazarr:/config'
      - '${MEDIA}/plex/movies:/movies'
      - '${MEDIA}/plex/tv:/tv'
    restart: unless-stopped
    networks:
      - proxy
      - internal

  lidarr:
    image: lscr.io/linuxserver/lidarr:latest
    container_name: lidarr
    logging: *default-logging
    hostname: lidarr
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
    volumes:
      - '${APPDATA}/lidarr:/config'
      - '${MEDIA}/plex/music:/music'
      - '${MEDIA}/downloads:/downloads'
    restart: unless-stopped
    networks:
      - proxy
      - internal

  prowlarr:
    image: lscr.io/linuxserver/prowlarr:latest
    container_name: prowlarr
    logging: *default-logging
    hostname: prowlarr
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
    volumes:
      - '${APPDATA}/prowlarr:/config'
    restart: unless-stopped
    networks:
      - proxy
      - internal

  radarr:
    image: lscr.io/linuxserver/radarr:latest
    container_name: radarr
    logging: *default-logging
    hostname: radarr
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
    volumes:
      - '${APPDATA}/radarr:/config'
      - '${MEDIA}/plex/movies:/movies'
      - '${MEDIA}/downloads:/downloads'
    restart: unless-stopped
    networks:
      - proxy
      - internal

  sonarr:
    image: lscr.io/linuxserver/sonarr:latest
    container_name: sonarr
    logging: *default-logging
    hostname: sonarr
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
    volumes:
      - '${APPDATA}/sonarr:/config'
      - '${MEDIA}/plex/tv:/tv'
      - '${MEDIA}/downloads:/downloads'
    restart: unless-stopped
    networks:
      - proxy
      - internal

#  calibre-web:
#    image: lscr.io/linuxserver/calibre-web:latest
#    container_name: calibre-web
#    environment:
#      - PUID=1000
#      - PGID=1000
#      - TZ=${TZ}
#      - DOCKER_MODS=linuxserver/calibre-web:calibre
#      - OAUTHLIB_RELAX_TOKEN_SCOPE=1
#    volumes:
#      - ${APPDATA}/calibre:/config
#      - ${MEDIA}/books:/books
#    restart: unless-stopped
#    networks:
#      - proxy
#      - internal

# User Landing Pages

  glance:
    image: glanceapp/glance
    container_name: glance
    logging: *default-logging
    hostname: glance
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
    volumes:
      - '${APPDATA}/glance:/app/config'
    restart: unless-stopped
    networks:
      - proxy

  homepage:
    image: ghcr.io/gethomepage/homepage:latest
    container_name: homepage
    logging: *default-logging
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
      - HOMEPAGE_ALLOWED_HOSTS=homepage.${FQDN}
    volumes:
      - '${APPDATA}/homepage/config:/app/config'
      - '${APPDATA}/homepage/images:/app/public/images'
      - '${APPDATA}/homepage/icons:/app/public/icons'
      - '${MEDIA}:/storage:ro'
    restart: unless-stopped
    networks:
      - proxy

# Media Server

  plex:
    image: lscr.io/linuxserver/plex:latest
    container_name: plex
    logging: *default-logging
    hostname: 819-plex
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
      - VERSION=public
    volumes:
      - '${APPDATA}/plex:/config'
      - '${MEDIA}/plex/tv:/tv'
      - '${MEDIA}/plex/movies:/movies'
      - '${MEDIA}/plex/music:/music'
    restart: unless-stopped
    networks:
      macvlan:
        ipv4_address: 192.168.1.5
      proxy:

  plextraktsync-watch:
    image: ghcr.io/taxel/plextraktsync:latest
    container_name: plextraktsync-watch
    logging: *default-logging
    command: watch
    volumes:
      - '${APPDATA}/plextraktsync/config:/app/config'
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
    restart: unless-stopped
    networks:
      - proxy

  plextraktsync-sync:
    image: ghcr.io/taxel/plextraktsync:latest
    container_name: plextraktsync-sync
    logging: *default-logging
    command: sync
    volumes:
      - '${APPDATA}/plextraktsync/config:/app/config'
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
    restart: on-failure:2
    networks:
      - proxy

  ofelia-scheduler:
    image: mcuadros/ofelia:latest
    container_name: ofelia-scheduler
    logging: *default-logging
    depends_on:
      - plextraktsync-sync
    command: daemon --docker
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
    restart: unless-stopped
    labels:
      ofelia.job-run.plextraktsync-daily.schedule: "0 0 3 * * *"  # Daily at 3 AM
      ofelia.job-run.plextraktsync-daily.container: "plextraktsync-sync"

  seerr:
    image: ghcr.io/fallenbagel/jellyseerr:latest
    container_name: seerr
    logging: *default-logging
    hostname: seerr
    init: true
    volumes:
      - '${APPDATA}/seerr/config:/app/config'
    environment:
      - PUID=1000
      - PGID=1000
      - LOG_LEVEL=debug
      - TZ=${TZ}
      - PORT=5055
    healthcheck:
      test: wget --no-verbose --tries=1 --spider http://localhost:5055/api/v1/status || exit 1
      start_period: 20s
      timeout: 3s
      interval: 15s
      retries: 3
    restart: unless-stopped
    networks:
      - proxy

  tautulli:
    image: lscr.io/linuxserver/tautulli:latest
    container_name: tautulli
    logging: *default-logging
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
    volumes:
      - '${APPDATA}/tautulli:/config'
    restart: unless-stopped
    networks:
      - proxy

# File Sharing

  qbittorrent:
    image: lscr.io/linuxserver/qbittorrent:latest
    container_name: qbittorrent
    logging: *default-logging
    hostname: qbittorrent
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
      - WEBUI_PORT=8090
    volumes:
      - '${APPDATA}/qbittorrent:/config'
      - '${MEDIA}/downloads:/downloads'
    restart: unless-stopped
    networks:
      - proxy
      - internal

  sabnzbd:
    image: lscr.io/linuxserver/sabnzbd:latest
    container_name: sabnzbd
    logging: *default-logging
    hostname: sabnzbd
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
    volumes:
      - '${APPDATA}/sabnzbd:/config'
      - '${MEDIA}/downloads:/downloads'
    restart: unless-stopped
    networks:
      - proxy
      - internal

  unpackerr:
    image: golift/unpackerr:latest
    container_name: "unpackerr"
    logging: *default-logging
    volumes:
      - '${APPDATA}/unpackerr:/data/logs'
      - '${MEDIA}/downloads:/data/downloads'
    restart: unless-stopped
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
      - UN_DEBUG=false
      - UN_LOG_FILE=/data/logs/log
      - UN_LOG_FILES=10
      - UN_LOG_FILE_MB=10
      - UN_INTERVAL=2m
      - UN_START_DELAY=1m
      - UN_RETRY_DELAY=5m
      - UN_MAX_RETRIES=3
      - UN_PARALLEL=1
      - UN_FILE_MODE=0644
      - UN_DIR_MODE=0755
      - UN_SONARR_0_URL=http://sonarr:8989
      - UN_SONARR_0_API_KEY=2c2bfe4d0458445387acf019c4af9347
      - UN_SONARR_0_PATHS_0=/data/downloads/qbittorrent/sonarr
      - UN_SONARR_0_PROTOCOLS=torrent
      - UN_SONARR_0_TIMEOUT=10s
      - UN_SONARR_0_DELETE_ORIG=false
      - UN_SONARR_0_DELETE_DELAY=5m
      - UN_RADARR_0_URL=http://radarr:7878
      - UN_RADARR_0_API_KEY=4cd71a712a694e2f956aa2ba385c2a8c
      - UN_RADARR_0_PATHS_0=/data/downloads/qbittorrent/radarr
      - UN_RADARR_0_PROTOCOLS=torrent
      - UN_RADARR_0_TIMEOUT=10s
      - UN_RADARR_0_DELETE_ORIG=false
      - UN_RADARR_0_DELETE_DELAY=5m
      - UN_LIDARR_0_URL=http://lidarr:8686
      - UN_LIDARR_0_API_KEY=eae6cc411d3f477f9482ddacd787b17e
      - UN_LIDARR_0_PATHS_0=/data/downloads/qbittorrent/lidarr
      - UN_LIDARR_0_PROTOCOLS=torrent
      - UN_LIDARR_0_TIMEOUT=10s
      - UN_LIDARR_0_DELETE_ORIG=false
      - UN_LIDARR_0_DELETE_DELAY=5m
    networks:
      - internal

# Chat/IRC Services

#  quassel-core:
#    image: lscr.io/linuxserver/quassel-core:latest
#    container_name: quassel-core
#    logging: *default-logging
#    environment:
#      - PUID=1000
#      - PGID=1000
#      - TZ=${TZ}
#      - RUN_OPTS=""
#      - DB_BACKEND=SQLite
#      - AUTH_AUTHENTICATOR=Database
#    volumes:
#      - '${APPDATA}/quassel-core:/config'
#    restart: unless-stopped
#    networks:
#      - internal
#    ports:
#      - "4242:4242"
#
#  quassel-web:
#    image: lscr.io/linuxserver/quassel-web:latest
#    container_name: quassel-web
#    logging: *default-logging
#    environment:
#      - PUID=1000
#      - PGID=1000
#      - TZ=${TZ}
#      - QUASSEL_CORE=quassel-core
#      - QUASSEL_PORT=4242
#      - URL_BASE=/quassel
#    volumes:
#      - '${APPDATA}/quassel-web:/config'
#    restart: unless-stopped
#    networks:
#      - proxy
#      - internal

  conduit:
    image: matrixconduit/matrix-conduit:latest
    container_name: conduit
    logging: *default-logging
    environment:
      CONDUIT_CONFIG: "/etc/conduit.toml"
      RUST_BACKTRACE: 1
    volumes:
      - ${APPDATA}/conduit/data:/var/lib/matrix-conduit
      - ${APPDATA}/conduit/conduit.toml:/etc/conduit.toml:ro
    restart: unless-stopped
    networks:
      - proxy

  element-web:
    image: vectorim/element-web:latest
    container_name: element-web
    logging: *default-logging
    restart: unless-stopped
    volumes:
      - ${APPDATA}/element-web/element-config.json:/app/config.json
    networks:
      - proxy

# Network Services

  unifi-db:
    image: docker.io/mongo:7.0
    container_name: unifi-db
    logging: *default-logging
    restart: unless-stopped
    networks:
      - internal
    environment:
      - MONGO_INITDB_ROOT_USERNAME=${MONGO_INITDB_ROOT_USERNAME}
      - MONGO_INITDB_ROOT_PASSWORD=${MONGO_INITDB_ROOT_PASSWORD}
      - MONGO_USER=${MONGO_USER}
      - MONGO_PASS=${MONGO_PASS}
      - MONGO_DBNAME=${MONGO_DBNAME}
      - MONGO_AUTHSOURCE=${MONGO_AUTHSOURCE}
    volumes:
      - ${APPDATA}/unifi/db/data:/data/db
      - ${APPDATA}/unifi/db/init/init-mongo.sh:/docker-entrypoint-initdb.d/init-mongo.sh:ro

  unifi-network-application:
    image: lscr.io/linuxserver/unifi-network-application:latest
    container_name: unifi-network-application
    logging: *default-logging
    restart: unless-stopped
    depends_on:
      - unifi-db
    networks:
      macvlan:
        ipv4_address: 192.168.1.6
      internal:
      proxy:
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=${TZ}
      # Database Connection
      - MONGO_USER=${MONGO_USER}
      - MONGO_PASS=${MONGO_PASS}
      - MONGO_HOST=unifi-db
      - MONGO_PORT=27017
      - MONGO_DBNAME=${MONGO_DBNAME}
      - MONGO_AUTHSOURCE=${MONGO_AUTHSOURCE}
      # Performance
      - MEM_LIMIT=1024
      - MEM_STARTUP=1024
      - MOUNT_POINTS=/config
    volumes:
      - '${APPDATA}/unifi/app:/config'

  samba:
    image: ghcr.io/servercontainers/samba:latest
    container_name: samba
    logging: *default-logging
    hostname: storage
    restart: unless-stopped
    cap_add:
      - CAP_NET_ADMIN
    environment:
      - SAMBA_CONF_LOG_LEVEL=1
      - AVAHI_NAME=storage
      - SAMBA_CONF_WORKGROUP=WORKGROUP
      - SAMBA_CONF_SERVER_STRING=storage
      - MODEL=TimeCapsule
      - ACCOUNT_${SAMBA_USER1}=${SAMBA_PASS1}
      - UID_${SAMBA_USER1}=1000
      - SAMBA_GLOBAL_CONFIG_fruit:aapl=yes
      - SAMBA_GLOBAL_CONFIG_fruit:nfs_aces=no
      - |-
        SAMBA_VOLUME_CONFIG_storage=[Storage]
        path=/shares/storage
        valid users = ${SAMBA_USER1}
        guest ok = no
        read only = no
        browseable = yes
        create mask = 0664
        directory mask = 0775
        vfs objects = catia fruit streams_xattr
        fruit:resource = file
        fruit:metadata = stream
        fruit:encoding = native
      - |-
        SAMBA_VOLUME_CONFIG_cache=[Cache]
        path=/shares/cache
        valid users = ${SAMBA_USER1}
        guest ok = no
        read only = no
        browseable = yes
        create mask = 0664
        directory mask = 0775
        vfs objects = catia fruit streams_xattr
        fruit:resource = file
        fruit:metadata = stream
        fruit:encoding = native
    volumes:
      - ${MEDIA}:/shares/storage
      - /mnt/nvme:/shares/cache
    networks:
      macvlan:
        ipv4_address: 192.168.1.7

# --- OBSERVABILITY CORE ---

  # VictoriaMetrics: Metrics Database
  victoriametrics:
    image: victoriametrics/victoria-metrics:latest
    container_name: victoriametrics
    logging: *default-logging
    user: "1000:1000"
    volumes:
      - ${APPDATA}/victoriametrics:/storage
      - ${APPDATA}/victoriametrics/prometheus.yml:/etc/prometheus/prometheus.yml
    command:
      - '-storageDataPath=/storage'
      - '-retentionPeriod=2y'
      - '-promscrape.config=/etc/prometheus/prometheus.yml'
      - '-httpListenAddr=:8428'
    restart: always
    networks:
      - internal
      - proxy

  # Loki: Log Database
  loki:
    image: grafana/loki:latest
    container_name: loki
    logging: *default-logging
    user: "1000:1000"
    command: -config.file=/etc/loki/local-config.yaml
    volumes:
      - ${APPDATA}/loki/data:/loki
      - ${APPDATA}/loki/tmp:/tmp/loki
      - ${APPDATA}/loki/loki-config.yaml:/etc/loki/local-config.yaml
    restart: always
    networks:
      - internal

  # Grafana: The Dashboard
  grafana:
    image: grafana/grafana:latest
    container_name: grafana
    logging: *default-logging
    user: "1000:1000"
    volumes:
      - ${APPDATA}/grafana:/var/lib/grafana
    restart: always
    networks:
      - internal
      - proxy

  # --- OBSERVABILITY COLLECTORS ---

  # Unpoller: Unifi Stats
  unpoller:
    image: ghcr.io/unpoller/unpoller:latest
    container_name: unpoller
    logging: *default-logging
    user: "1000:1000"
    environment:
      - UP_UNIFI_DEFAULT_URL=https://unifi-network-application:8443
      - UP_UNIFI_DEFAULT_USER=${UNIFI_RO_USER}
      - UP_UNIFI_DEFAULT_PASS=${UNIFI_RO_PASS}
      - UP_UNIFI_DEFAULT_SAVE_SITES=true
      - UP_INFLUXDB_DISABLE=true
      - UP_PROMETHEUS_HTTP_LISTEN=:9130
      - UP_UNIFI_DEFAULT_SSL_VERIFY=false
    restart: always
    networks:
      - internal

  # Node Exporter (VM): Monitors USB HDD & VM Resources
  node-exporter-vm:
    image: quay.io/prometheus/node-exporter:latest
    container_name: node-exporter-vm
    logging: *default-logging
    command:
      - '--path.rootfs=/host'
    pid: host
    restart: always
    volumes:
      - '/:/host:ro,rslave'
    networks:
      - internal

  # cAdvisor: Container Stats
  cadvisor:
    image: gcr.io/cadvisor/cadvisor:latest
    container_name: cadvisor
    logging: *default-logging
    privileged: true
    devices:
      - /dev/kmsg
    volumes:
      - /:/rootfs:ro
      - /var/run:/var/run:ro
      - /sys:/sys:ro
      - /var/lib/docker/:/var/lib/docker:ro
      - /dev/disk/:/dev/disk:ro
    restart: always
    networks:
      - internal

  # Promtail: Log Shipper
  promtail:
    image: grafana/promtail:latest
    container_name: promtail
    logging: *default-logging
    volumes:
      - /var/lib/docker/containers:/var/lib/docker/containers:ro
      - ${APPDATA}/promtail/config.yml:/etc/promtail/config.yml
    command: -config.file=/etc/promtail/config.yml
    restart: always
    networks:
      - internal

  # Proxmox Exporter
  pve-exporter:
    image: prompve/prometheus-pve-exporter:latest
    container_name: pve-exporter
    logging: *default-logging
    volumes:
      - ${APPDATA}/pve-exporter/pve.yml:/etc/prometheus/pve.yml
    restart: always
    networks:
      - proxy

  # Docker Socket Proxy
  dockerproxy:
    image: ghcr.io/tecnativa/docker-socket-proxy:latest
    container_name: dockerproxy
    logging: *default-logging
    environment:
      - CONTAINERS=1 # Allow access to viewing containers
      - SERVICES=1 # Allow access to viewing services
      - TASKS=1 # Allow access to viewing tasks
      - POST=0 # Disallow any POST operations
    ports:
      - 127.0.0.1:2375:2375
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
    restart: unless-stopped
    networks:
      - proxy
  

networks:
  macvlan:
    driver: macvlan
    driver_opts:
      parent: eth0
    ipam:
      config:
        - subnet: 192.168.1.0/24
          gateway: 192.168.1.1

  proxy:
    driver: bridge
    driver_opts:
      com.docker.network.bridge.name: br-proxy
    ipam:
      config:
        - subnet: 172.20.0.0/16
          gateway: 172.20.0.1
  
  internal:
    driver: bridge
    internal: true
    driver_opts:
      com.docker.network.bridge.name: br-internal
    ipam:
      config:
        - subnet: 172.21.0.0/16
          gateway: 172.21.0.1