-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcompose.yml
More file actions
executable file
·153 lines (144 loc) · 3.55 KB
/
compose.yml
File metadata and controls
executable file
·153 lines (144 loc) · 3.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# ===================
# ⒸAngelaMos | 2026
# Vuemantics - Dev Compose
# ===================
name: ${APP_NAME:-compose}
services:
postgres:
image: pgvector/pgvector:pg16
container_name: ${APP_NAME:-vuemantics}-postgres
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-devpassword}
POSTGRES_DB: multimodal_search_dev
ports:
- "${POSTGRES_HOST_PORT:-5455}:5432"
volumes:
- postgres_data_dev:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
networks:
- multimodal-dev
redis:
image: redis:7-alpine
container_name: ${APP_NAME:-vuemantics}-redis
ports:
- "${REDIS_HOST_PORT:-6361}:6379"
volumes:
- redis_data_dev:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
networks:
- multimodal-dev
ollama:
image: ollama/ollama:latest
container_name: ${APP_NAME:-vuemantics}-ollama
runtime: nvidia
ports:
- "${OLLAMA_HOST_PORT:-11435}:11434"
deploy:
resources:
limits:
cpus: '8.0'
memory: 24G
reservations:
cpus: '4.0'
memory: 8G
devices:
- driver: nvidia
count: all
capabilities: [gpu]
volumes:
- ollama_data_dev:/root/.ollama
networks:
- multimodal-dev
restart: unless-stopped
environment:
- NVIDIA_VISIBLE_DEVICES=all
- OLLAMA_KEEP_ALIVE=30m
- OLLAMA_FLASH_ATTENTION=1
healthcheck:
test: ["CMD", "ollama", "list"]
interval: 30s
timeout: 10s
retries: 3
start_period: 15s
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
backend:
build:
context: .
dockerfile: infra/dev/docker/fastapi.dev
container_name: ${APP_NAME:-vuemantics}-backend
ports:
- "${BACKEND_HOST_PORT:-8776}:8000"
volumes:
- ./backend:/app
- /app/.venv
- ./storage:/storage
env_file:
- ./.env
environment:
- PYTHONUNBUFFERED=1
- WATCHFILES_FORCE_POLLING=true
extra_hosts:
- "host.docker.internal:host-gateway"
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
ollama:
condition: service_healthy
command: uv run uvicorn main:app --host 0.0.0.0 --port 8000 --reload --timeout-graceful-shutdown 2
networks:
- multimodal-dev
frontend:
build:
context: ./frontend
dockerfile: ../infra/dev/docker/vite.dev
container_name: ${APP_NAME:-vuemantics}-frontend
ports:
- "${FRONTEND_HOST_PORT:-5276}:5173"
volumes:
- ./frontend:/app
- /app/node_modules
environment:
- VITE_API_URL=/api
- CHOKIDAR_USEPOLLING=true
networks:
- multimodal-dev
nginx:
image: nginx:alpine
container_name: ${APP_NAME:-vuemantics}-nginx
ports:
- "${NGINX_HOST_PORT:-856}:80"
volumes:
- ./infra/dev/nginx/nginx.dev.conf:/etc/nginx/nginx.conf:ro
- ./infra/dev/nginx/dev.nginx:/etc/nginx/conf.d/default.conf:ro
- ./storage/uploads:/var/www/uploads:ro
depends_on:
- backend
- frontend
networks:
- multimodal-dev
networks:
multimodal-dev:
driver: bridge
volumes:
postgres_data_dev:
external: true
name: dev_postgres_data_dev
redis_data_dev:
ollama_data_dev:
external: true
name: dev_ollama_data_dev