ft_transcendence/docker-compose.yml
2026-01-16 15:52:44 +01:00

385 lines
8.7 KiB
YAML

networks:
app:
driver: bridge
monitoring:
driver: bridge
services:
##########
# DEPS #
##########
pnpm_base:
build:
context: ./src
dockerfile: '@dockerfiles/pnpm.Dockerfile'
restart: on-failure:3
pnpm_deps:
build:
context: ./src
dockerfile: '@dockerfiles/deps.Dockerfile'
additional_contexts:
pnpm_base: "service:pnpm_base"
restart: on-failure:3
frontend:
build:
context: ./frontend
additional_contexts:
pnpm_base: "service:pnpm_base"
container_name: app-frontend
restart: on-failure:3
############
# SERVICES #
############
nginx:
build:
context: ./nginx
additional_contexts:
frontend: "service:frontend"
container_name: app-nginx
restart: always
networks:
- app
- monitoring
ports:
- '8888:443'
- '9090:8443'
volumes:
# if you need to share files with nginx, you do it here.
- icons-volume:/volumes/icons
env_file: .env
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
auth:
build:
context: ./src/
args:
- SERVICE=auth
additional_contexts:
pnpm_base: "service:pnpm_base"
pnpm_deps: "service:pnpm_deps"
container_name: app-auth
restart: always
networks:
- app
volumes:
- sqlite-volume:/volumes/database
- icons-volume:/volumes/icons
- ./src/auth/config:/config
env_file: .env
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
###############
# TIC-TAC-TOE #
###############
tic-tac-toe:
build:
context: ./src/
args:
- SERVICE=tic-tac-toe
additional_contexts:
pnpm_base: "service:pnpm_base"
pnpm_deps: "service:pnpm_deps"
container_name: app-tic-tac-toe
restart: always
networks:
- app
volumes:
- sqlite-volume:/volumes/database
env_file: .env
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
###############
# ICONS #
###############
icons:
build:
context: ./src/
args:
- SERVICE=icons
additional_contexts:
pnpm_base: "service:pnpm_base"
pnpm_deps: "service:pnpm_deps"
container_name: app-icons
restart: always
networks:
- app
volumes:
- sqlite-volume:/volumes/database
- icons-volume:/volumes/icons
env_file: .env
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
###############
# CHAT #
###############
chat:
build:
context: ./src/
args:
- SERVICE=chat
additional_contexts:
pnpm_base: "service:pnpm_base"
pnpm_deps: "service:pnpm_deps"
container_name: app-chat
restart: always
networks:
- app
env_file: .env
volumes:
- sqlite-volume:/volumes/database
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
###############
# PONG #
###############
pong:
build:
context: ./src/
args:
- SERVICE=pong
additional_contexts:
pnpm_base: "service:pnpm_base"
pnpm_deps: "service:pnpm_deps"
container_name: app-pong
restart: always
networks:
- app
volumes:
- sqlite-volume:/volumes/database
env_file: .env
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
###############
# USER #
###############
user:
build:
context: ./src/
args:
- SERVICE=user
additional_contexts:
pnpm_base: "service:pnpm_base"
pnpm_deps: "service:pnpm_deps"
container_name: app-user
restart: always
networks:
- app
volumes:
- sqlite-volume:/volumes/database
env_file: .env
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
###############
# MONITORING #
###############
grafana:
container_name: mon-grafana
image: grafana/grafana-enterprise
restart: always
networks:
- app
- monitoring
depends_on:
- prometheus
volumes:
- ./monitoring/grafana/alerting:/etc/grafana/provisioning/alerting
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources
- ./monitoring/grafana/dashboards-config:/etc/grafana/provisioning/dashboards
- ./monitoring/grafana/dashboards:/var/lib/grafana/dashboards
- grafana-data:/var/lib/grafana
env_file: .env
environment:
- GF_LOG_LEVEL=warn
- GF_SERVER_ROOT_URL=https:/${NGINX_DOMAIN}/:9090/grafana/
- GF_SECURITY_ADMIN_USER=${GRAFANA_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASS}
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
healthcheck:
test: ["CMD-SHELL", "curl -f -s http://localhost:3000/api/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
prometheus:
image: prom/prometheus:latest
container_name: mon-prometheus
networks:
- app
- monitoring
volumes:
- ./monitoring/prometheus:/etc/prometheus/
restart: unless-stopped
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
env_file: .env
healthcheck:
test: ["CMD", "wget", "--quiet", "--spider", "http://localhost:9090/-/healthy"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
networks:
- monitoring
container_name: mon-cadvisor
command:
- '-url_base_prefix=/cadvisor'
env_file: .env
environment:
- CADVISOR_HEALTHCHECK_URL=http://localhost:8080/cadvisor/healthz
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
restart: unless-stopped
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
blackbox:
image: prom/blackbox-exporter:latest
container_name: mon-blackbox
networks:
- app
restart: unless-stopped
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
env_file: .env
healthcheck:
test: ["CMD", "wget", "--quiet", "--spider", "http://localhost:9115/-/healthy"]
interval: 30s
timeout: 5s
retries: 3
start_period: 10s
###############
# LOGS #
###############
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.23
container_name: log-elasticsearch
networks:
- monitoring
env_file: .env
environment:
- discovery.type=single-node
- ES_JAVA_OPTS=-Xms512m -Xmx512m
volumes:
- elastic-data:/usr/share/elasticsearch/data
- ./logs/elasticsearch:/setup
command: ["/setup/bootstrap.sh"]
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "-s", "localhost:9200"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
logstash:
image: docker.elastic.co/logstash/logstash:7.17.23
container_name: log-logstash
depends_on:
- elasticsearch
networks:
- monitoring
volumes:
- ./logs/logstash/pipeline:/usr/share/logstash/pipeline
ports:
- "12201:12201/udp"
restart: unless-stopped
env_file: .env
healthcheck:
test: ["CMD", "curl", "-f", "-s", "localhost:9600"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
kibana:
image: docker.elastic.co/kibana/kibana:7.17.23
container_name: log-kibana
depends_on:
- elasticsearch
networks:
- monitoring
- app
env_file: .env
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
- SERVER_PUBLICBASEURL=https://${NGINX_DOMAIN}:9090/kibana
- SERVER_BASEPATH=/kibana
- SERVER_REWRITEBASEPATH=true
- ELASTICSEARCH_USERNAME=elastic
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
volumes:
- ./logs/kibana:/setup
command: ["/setup/bootstrap.sh"]
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "-s", "localhost:5601/kibana/api/status"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
volumes:
sqlite-volume:
icons-volume:
grafana-data:
elastic-data: