ft_transcendence/docker-compose.yml
Maieul BOYER 2ed524872b feat(docker): Multi stage docker now fetch deps once
Split dockerfile into three different dockerfiles such that dependencies
are only downloaded once

This allows the build to be a bit faster, since all deps are downloaded
once at the start.

This also makes it so the frontend container no longer needs to be ran,
as its files are directly embedded into the nginx container

This also remove the extra files, since bind mounts do work
it also remove the entrypoint.sh file, as you should prefer to not use
it
2025-12-30 19:02:07 +01:00

339 lines
8.3 KiB
YAML

networks:
app:
driver: bridge
monitoring:
driver: bridge
services:
##########
# DEPS #
##########
pnpm_base:
build:
context: ./src
dockerfile: '@dockerfiles/pnpm.Dockerfile'
restart: on-failure:3
pnpm_deps:
build:
context: ./src
dockerfile: '@dockerfiles/deps.Dockerfile'
additional_contexts:
pnpm_base: "service:pnpm_base"
restart: on-failure:3
frontend:
build:
context: ./frontend
additional_contexts:
pnpm_base: "service:pnpm_base"
container_name: app-frontend
restart: on-failure:3
############
# SERVICES #
############
nginx:
build:
context: ./nginx
additional_contexts:
frontend: "service:frontend"
container_name: app-nginx
restart: always
networks:
- app
- monitoring
ports:
- '8888:443'
- '9090:8443'
volumes:
# if you need to share files with nginx, you do it here.
- static-volume:/volumes/static
environment:
# this can stay the same for developpement. This is an alias to `localhost`
- NGINX_DOMAIN=local.maix.me
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
auth:
build:
context: ./src/
args:
- SERVICE=auth
additional_contexts:
pnpm_base: "service:pnpm_base"
pnpm_deps: "service:pnpm_deps"
container_name: app-auth
restart: always
networks:
- app
volumes:
- sqlite-volume:/volumes/database
- static-volume:/volumes/static
- ./src/auth/config:/config
environment:
- JWT_SECRET=KRUGKIDROVUWG2ZAMJZG653OEBTG66BANJ2W24DTEBXXMZLSEB2GQZJANRQXU6JA
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
###############
# TIC-TAC-TOE #
###############
tic-tac-toe:
build:
context: ./src/
args:
- SERVICE=tic-tac-toe
additional_contexts:
pnpm_base: "service:pnpm_base"
pnpm_deps: "service:pnpm_deps"
container_name: app-tic-tac-toe
restart: always
networks:
- app
volumes:
- sqlite-volume:/volumes/database
- static-volume:/volumes/static
environment:
- JWT_SECRET=KRUGKIDROVUWG2ZAMJZG653OEBTG66BANJ2W24DTEBXXMZLSEB2GQZJANRQXU6JA
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
###############
# CHAT #
###############
chat:
build:
context: ./src/
args:
- SERVICE=chat
additional_contexts:
pnpm_base: "service:pnpm_base"
pnpm_deps: "service:pnpm_deps"
container_name: app-chat
restart: always
networks:
- app
volumes:
- sqlite-volume:/volumes/database
- static-volume:/volumes/static
environment:
- JWT_SECRET=KRUGKIDROVUWG2ZAMJZG653OEBTG66BANJ2W24DTEBXXMZLSEB2GQZJANRQXU6JA
- PROVIDER_FILE=/extra/providers.toml
- SESSION_MANAGER=${SESSION_MANAGER}
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
user:
build:
context: ./src/
args:
- SERVICE=user
additional_contexts:
pnpm_base: "service:pnpm_base"
pnpm_deps: "service:pnpm_deps"
container_name: app-user
restart: always
networks:
- app
volumes:
- sqlite-volume:/volumes/database
- static-volume:/volumes/static
environment:
- JWT_SECRET=KRUGKIDROVUWG2ZAMJZG653OEBTG66BANJ2W24DTEBXXMZLSEB2GQZJANRQXU6JA
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
###############
# MONITORING #
###############
grafana:
container_name: mon-grafana
image: grafana/grafana-enterprise
restart: always
networks:
- app
- monitoring
depends_on:
- prometheus
volumes:
- ./monitoring/grafana/alerting:/etc/grafana/provisioning/alerting
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources
- ./monitoring/grafana/dashboards-config:/etc/grafana/provisioning/dashboards
- ./monitoring/grafana/dashboards:/var/lib/grafana/dashboards
- grafana-data:/var/lib/grafana
environment:
# this can stay the same for developpement. This is an alias to `localhost`
- NGINX_DOMAIN=local.maix.me
- GF_LOG_LEVEL=warn
- GF_SERVER_ROOT_URL=https://local.maix.me:9090/grafana/
- GF_SECURITY_ADMIN_USER=${GRAFANA_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASS}
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
healthcheck:
test: ["CMD-SHELL", "curl -f -s http://localhost:3000/api/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
prometheus:
image: prom/prometheus:latest
container_name: mon-prometheus
networks:
- app
- monitoring
volumes:
- ./monitoring/prometheus:/etc/prometheus/
restart: unless-stopped
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
healthcheck:
test: ["CMD", "wget", "--quiet", "--spider", "http://localhost:9090/-/healthy"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
networks:
- monitoring
container_name: mon-cadvisor
command:
- '-url_base_prefix=/cadvisor'
environment:
- CADVISOR_HEALTHCHECK_URL=http://localhost:8080/cadvisor/healthz
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
restart: unless-stopped
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
blackbox:
image: prom/blackbox-exporter:latest
container_name: mon-blackbox
networks:
- app
restart: unless-stopped
logging:
driver: gelf
options:
gelf-address: "udp://127.0.0.1:12201"
tag: "{{.Name}}"
healthcheck:
test: ["CMD", "wget", "--quiet", "--spider", "http://localhost:9115/-/healthy"]
interval: 30s
timeout: 5s
retries: 3
start_period: 10s
###############
# LOGS #
###############
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.23
container_name: log-elasticsearch
networks:
- monitoring
environment:
- discovery.type=single-node
- ES_JAVA_OPTS=-Xms512m -Xmx512m
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
volumes:
- elastic-data:/usr/share/elasticsearch/data
- ./logs/elasticsearch:/setup
command: ["/setup/bootstrap.sh"]
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "-s", "localhost:9200"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
logstash:
image: docker.elastic.co/logstash/logstash:7.17.23
container_name: log-logstash
depends_on:
- elasticsearch
networks:
- monitoring
volumes:
- ./logs/logstash/pipeline:/usr/share/logstash/pipeline
ports:
- "12201:12201/udp"
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "-s", "localhost:9600"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
kibana:
image: docker.elastic.co/kibana/kibana:7.17.23
container_name: log-kibana
depends_on:
- elasticsearch
networks:
- monitoring
- app
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
- SERVER_PUBLICBASEURL=https://local.maix.me:9090/kibana
- SERVER_BASEPATH=/kibana
- SERVER_REWRITEBASEPATH=true
- ELASTICSEARCH_USERNAME=elastic
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
volumes:
- ./logs/kibana:/setup
command: ["/setup/bootstrap.sh"]
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "-s", "localhost:5601/kibana/api/status"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
volumes:
sqlite-volume:
static-volume:
grafana-data:
elastic-data: