-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathdocker-compose.yaml
More file actions
267 lines (251 loc) · 8.19 KB
/
docker-compose.yaml
File metadata and controls
267 lines (251 loc) · 8.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
name: peerdb-postgres-clickhouse-example
x-minio-config: &minio-config
PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID: _peerdb_minioadmin
PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY: _peerdb_minioadmin
PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_REGION: us-east-1
PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ENDPOINT_URL_S3: http://minio:9000
PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME: peerdbbucket
x-catalog-config: &catalog-config
PEERDB_CATALOG_HOST: catalog
PEERDB_CATALOG_PORT: 5432
PEERDB_CATALOG_USER: postgres
PEERDB_CATALOG_PASSWORD: postgres
PEERDB_CATALOG_DATABASE: postgres
x-flow-worker-env: &flow-worker-env
TEMPORAL_HOST_PORT: temporal:7233
TEMPORAL_CLIENT_CERT:
TEMPORAL_CLIENT_KEY:
PEERDB_TEMPORAL_NAMESPACE: default
# For GCS, these will be your HMAC keys instead
# For more information:
# https://cloud.google.com/storage/docs/authentication/managing-hmackeys
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-}
# For GCS, set this to "auto" without the quotes
AWS_REGION: ${AWS_REGION:-}
# For GCS, set this as: https://storage.googleapis.com
AWS_ENDPOINT: ${AWS_ENDPOINT:-}
services:
catalog:
image: postgres:18-alpine@sha256:b40d931bd0e7ce6eecc59a5a6ac3b3c04a01e559750e73e7086b6dbd7f8bf545
command: -c config_file=/etc/postgresql.conf
restart: unless-stopped
ports:
- "127.0.0.1:19901:5432"
environment:
PGUSER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: postgres
POSTGRES_INITDB_ARGS: --locale=C.UTF-8
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- pgdata:/var/lib/postgresql
- ./volumes/catalog/postgresql.conf:/etc/postgresql.conf
- ./volumes/catalog/pg_hba.conf:/etc/pg_hba.conf
- ./volumes/catalog/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
healthcheck:
test: [ "CMD", "pg_isready", "-d", "postgres", "-U", "postgres" ]
interval: 10s
timeout: 30s
retries: 5
start_period: 60s
temporal:
restart: unless-stopped
depends_on:
catalog:
condition: service_healthy
environment:
- DB=postgres12
- DB_PORT=5432
- POSTGRES_USER=postgres
- POSTGRES_PWD=postgres
- POSTGRES_SEEDS=catalog
- DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml
image: temporalio/auto-setup:1.29@sha256:8240e81d9b6946e8cc9b86714a04733cec32b1332b8523961ad47700861036f7
ports:
- "127.0.0.1:17233:7233"
volumes:
- ./volumes/temporal-dynamicconfig:/etc/temporal/config/dynamicconfig
labels:
kompose.volume.type: configMap
temporal-admin-tools:
restart: unless-stopped
depends_on:
- temporal
environment:
- TEMPORAL_ADDRESS=temporal:7233
- TEMPORAL_CLI_ADDRESS=temporal:7233
- TEMPORAL_CLI_SHOW_STACKS=1
image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1@sha256:da0c7a7982b571857173ab8f058e7f139b3054800abb4dcb100445d29a563ee8
stdin_open: true
tty: true
entrypoint: /etc/temporal/entrypoint.sh
healthcheck:
test: [ "CMD", "tctl", "workflow", "list" ]
interval: 1s
timeout: 5s
retries: 30
volumes:
- ./scripts/mirror-name-search.sh:/etc/temporal/entrypoint.sh
temporal-ui:
restart: unless-stopped
depends_on:
- temporal
environment:
- TEMPORAL_ADDRESS=temporal:7233
- TEMPORAL_CORS_ORIGINS=http://localhost:13000
- TEMPORAL_CSRF_COOKIE_INSECURE=true
image: temporalio/ui:2.44.0@sha256:0b36e00aad304134eb908f41bf915934afe4eef1efc5b1750c65a5312ff3c9bb
ports:
- "127.0.0.1:18085:8080"
flow-api:
image: ghcr.io/peerdb-io/flow-api:stable-v0.36.9
restart: unless-stopped
ports:
- "127.0.0.1:18112:8112"
- "127.0.0.1:18113:8113"
environment:
<<: [ *catalog-config, *flow-worker-env, *minio-config ]
PEERDB_ALLOWED_TARGETS:
extra_hosts:
- "host.docker.internal:host-gateway"
depends_on:
temporal-admin-tools:
condition: service_healthy
flow-snapshot-worker:
image: ghcr.io/peerdb-io/flow-snapshot-worker:stable-v0.36.9
restart: unless-stopped
environment:
<<: [ *catalog-config, *flow-worker-env, *minio-config ]
depends_on:
temporal-admin-tools:
condition: service_healthy
flow-worker:
image: ghcr.io/peerdb-io/flow-worker:stable-v0.36.9
restart: unless-stopped
environment:
<<: [ *catalog-config, *flow-worker-env, *minio-config ]
extra_hosts:
- "host.docker.internal:host-gateway"
depends_on:
temporal-admin-tools:
condition: service_healthy
peerdb:
stop_signal: SIGINT
image: ghcr.io/peerdb-io/peerdb-server:stable-v0.36.9
restart: unless-stopped
environment:
<<: *catalog-config
PEERDB_PASSWORD:
PEERDB_FLOW_SERVER_ADDRESS: grpc://flow-api:8112
RUST_LOG: info
RUST_BACKTRACE: 1
ports:
- "127.0.0.1:19900:9900"
depends_on:
catalog:
condition: service_healthy
peerdb-ui:
image: ghcr.io/peerdb-io/peerdb-ui:stable-v0.36.9
restart: unless-stopped
ports:
- "127.0.0.1:13000:3000"
environment:
<<: *catalog-config
PEERDB_FLOW_SERVER_HTTP: http://flow-api:8113
NEXTAUTH_SECRET: __changeme__
NEXTAUTH_URL: http://localhost:13000
PEERDB_ALLOWED_TARGETS:
PEERDB_CLICKHOUSE_ALLOWED_DOMAINS:
PEERDB_EXPERIMENTAL_ENABLE_SCRIPTING: true
depends_on:
- flow-api
minio:
image: minio/minio:latest@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e
restart: unless-stopped
volumes:
- minio-data:/data
ports:
- "127.0.0.1:19001:9000"
- "127.0.0.1:19002:36987"
environment:
<<: *minio-config
entrypoint: >
/bin/sh -c " export MINIO_ROOT_USER=$$PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID; export MINIO_ROOT_PASSWORD=$$PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY; minio server /data --console-address=":36987" & sleep 2; /usr/bin/mc alias set myminiopeerdb http://minio:9000 $$MINIO_ROOT_USER $$MINIO_ROOT_PASSWORD; /usr/bin/mc mb myminiopeerdb/$$PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME; wait "
postgres:
hostname: postgres
image: ghcr.io/clickhouse/pg_clickhouse:18
restart: always
environment:
POSTGRES_USER: admin
POSTGRES_PASSWORD: password
POSTGRES_DB: postgres
ports:
- "127.0.0.1:15432:5432"
volumes:
- postgresdata:/var/lib/postgresql
- ./volumes/postgres/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
command:
- "postgres"
- "-c" # your existing settings kept
- "wal_level=logical"
- "-c"
- "max_replication_slots=10"
- "-c"
- "max_wal_senders=10"
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U admin -d postgres -h localhost" ]
interval: 10s
timeout: 5s
retries: 10
clickhouse:
image: clickhouse:25.8
user: "101:101"
hostname: clickhouse
volumes:
- clickhousedata:/var/lib/clickhouse
- ./volumes/clickhouse/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
- ./volumes/clickhouse/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
- ./volumes/clickhouse/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
ports:
- "127.0.0.1:18123:8123"
- "127.0.0.1:19000:9000"
depends_on:
- postgres
expense-app:
image: node:24-alpine
working_dir: /app
volumes:
- ./sample/pg-expense-direct:/app
- sample_node_modules:/app/node_modules
ports:
- "127.0.0.1:18080:8080"
environment:
DB_HOST: postgres
DB_USER: admin
DB_PASSWORD: password
DB_NAME: postgres
DB_PORT: "5432"
SOURCE_SCHEMA: public
DESTINATION_SCHEMA: expense_ch
SEED_EXPENSE_ROWS: ${SEED_EXPENSE_ROWS:-10000000}
depends_on:
postgres:
condition: service_healthy
entrypoint: /bin/sh -c
command:
- |
npm install
npm run dev
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
pgdata:
postgresdata:
clickhousedata:
minio-data:
sample_node_modules:
networks:
default:
name: peerdb_network