Skip to content

Commit 934bde2

Browse files
committed
multiple sparks
1 parent 6d8cebb commit 934bde2

File tree

4 files changed

+116
-30
lines changed

4 files changed

+116
-30
lines changed

docker-compose.yml

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -245,6 +245,8 @@ services:
245245
environment:
246246
- DATABASE_URL=postgresql://lightning-rgs:docker@postgres:5432/sparkoperator_0?sslmode=disable
247247
- RPCPASSWORD=$RPCPASSWORD
248+
- SPARK_INDEX=0
249+
- FROST_SIGNER_SOCKET=unix:///tmp/frost_0.sock
248250
volumes:
249251
- ~/volumes/spark:/home/spark
250252
- ./spark-config.yaml:/config/so_config.yaml:ro
@@ -256,6 +258,36 @@ services:
256258
interval: 30s
257259
timeout: 10s
258260
retries: 3
261+
spark2:
262+
container_name: "spark2"
263+
build:
264+
context: ./spark
265+
user: "0:1000"
266+
logging: *default-logging
267+
restart: always
268+
stop_grace_period: 1m
269+
depends_on:
270+
- bitcoind
271+
- postgres
272+
ports:
273+
- "9737:9735" # Lightning port (offset to avoid conflict with spark1)
274+
- "10011:10010" # gRPC port (offset to avoid conflict with spark1)
275+
environment:
276+
- DATABASE_URL=postgresql://lightning-rgs:docker@postgres:5432/sparkoperator_1?sslmode=disable
277+
- RPCPASSWORD=$RPCPASSWORD
278+
- SPARK_INDEX=1
279+
- FROST_SIGNER_SOCKET=unix:///tmp/frost_1.sock
280+
volumes:
281+
- ~/volumes/spark2:/home/spark
282+
- ./spark-config.yaml:/config/so_config.yaml:ro
283+
- ./spark-entrypoint.sh:/usr/local/bin/spark-entrypoint.sh:ro
284+
- ./keygen.py:/usr/local/bin/keygen.py:ro
285+
entrypoint: ["/usr/local/bin/spark-entrypoint.sh"]
286+
healthcheck:
287+
test: ["CMD", "grpc_health_probe", "-addr=localhost:10010"]
288+
interval: 30s
289+
timeout: 10s
290+
retries: 3
259291
web:
260292
container_name: "mempool_frontend"
261293
environment:

reset-spark.sh

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,29 +2,32 @@
22

33
echo "=== Resetting Spark Environment ==="
44

5-
# Stop the spark container
6-
echo "Stopping spark container..."
7-
docker compose stop spark
5+
# Stop both spark containers
6+
echo "Stopping spark containers..."
7+
docker compose stop spark spark2
88

9-
# Remove the spark container completely
10-
echo "Removing spark container..."
11-
docker-compose rm -f spark
9+
# Remove both spark containers completely
10+
echo "Removing spark containers..."
11+
docker-compose rm -f spark spark2
1212

13-
# Drop the spark database
14-
echo "Dropping spark database..."
13+
# Drop both spark databases
14+
echo "Dropping spark databases..."
1515
docker-compose exec postgres psql -U lightning-rgs -d postgres -c "DROP DATABASE IF EXISTS sparkoperator_0;"
16+
docker-compose exec postgres psql -U lightning-rgs -d postgres -c "DROP DATABASE IF EXISTS sparkoperator_1;"
1617

17-
# Recreate the database
18-
echo "Recreating spark database..."
18+
# Recreate both databases
19+
echo "Recreating spark databases..."
1920
docker-compose exec postgres psql -U lightning-rgs -d postgres -c "CREATE DATABASE sparkoperator_0;"
21+
docker-compose exec postgres psql -U lightning-rgs -d postgres -c "CREATE DATABASE sparkoperator_1;"
2022

21-
# Clean up the spark volume
22-
echo "Cleaning spark volume..."
23+
# Clean up both spark volumes
24+
echo "Cleaning spark volumes..."
2325
sudo rm -rf ~/volumes/spark/*
26+
sudo rm -rf ~/volumes/spark2/*
2427

25-
# Restart spark container
26-
echo "Starting fresh spark container..."
27-
docker-compose up -d --no-deps --build --force-recreate spark
28+
# Restart both spark containers
29+
echo "Starting fresh spark containers..."
30+
docker-compose up -d --no-deps --build --force-recreate spark spark2
2831

2932
echo "=== Spark reset complete! ==="
30-
echo "You can monitor the logs with: docker compose logs -f spark"
33+
echo "You can monitor the logs with: docker compose logs -f spark spark2"

spark-config.yaml

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
dkg:
2-
min_available_keys: 1 # Single node deployment
3-
threshold: 1 # Only need 1 signature
2+
min_available_keys: 1 # Two node deployment
43

54
bitcoind:
65
regtest:
@@ -21,15 +20,20 @@ database:
2120
pool_max_conn_idle_time: 30s
2221
pool_health_check_period: 15s
2322

24-
# Single node configuration - only one operator
23+
# Two operator configuration for DKG
2524
operators:
2625
- address: "0.0.0.0:10009"
2726
external_address: "spark:10009"
27+
address_dkg: "spark:10009"
2828
identity_public_key: "0322ca18fc489ae25418a0e768273c2c61cabb823edfb14feb891e9bec62016510"
29+
- address: "0.0.0.0:10010"
30+
external_address: "spark2:10010"
31+
address_dkg: "spark2:10010"
32+
identity_public_key: "0322ca18fc489ae25418a0e768273c2c61cabb823edfb14feb891e9bec62016511"
2933

30-
# Frost signer configuration
34+
# Frost signer configuration (will be substituted by entrypoint)
3135
frost:
32-
signer_socket: "unix:///tmp/frost_0.sock"
36+
signer_socket: "${FROST_SIGNER_SOCKET}"
3337

3438
lrc20:
3539
regtest:

spark-entrypoint.sh

Lines changed: 56 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,12 @@ run_migrations() {
4949

5050
# Function to start frost signer
5151
start_frost_signer() {
52+
local index=${SPARK_INDEX:-0}
53+
local signer_socket="/tmp/frost_${index}.sock"
54+
5255
echo "Starting Frost signer..."
53-
spark-frost-signer -u /tmp/frost_0.sock &
56+
echo "Signer socket: $signer_socket"
57+
spark-frost-signer -u $signer_socket &
5458
SIGNER_PID=$!
5559
echo "Frost signer started with PID: $SIGNER_PID"
5660

@@ -66,9 +70,10 @@ start_frost_signer() {
6670

6771
# Function to create identity key and operators config
6872
create_identity_and_config() {
69-
local key_file="/home/spark/operator_0.key"
73+
local index=${SPARK_INDEX:-0}
74+
local key_file="/home/spark/operator_${index}.key"
7075
local operators_file="/home/spark/operators.json"
71-
local keypair_file="/home/spark/keypair.txt"
76+
local keypair_file="/home/spark/keypair_${index}.txt"
7277

7378
if [ ! -f "$keypair_file" ]; then
7479
echo "Generating new secp256k1 key pair..."
@@ -110,15 +115,49 @@ create_identity_and_config() {
110115
chmod 600 "$key_file"
111116

112117
echo "Creating operators configuration..."
113-
cat > "$operators_file" << EOF
118+
if [ "$index" = "0" ]; then
119+
# Generate second operator's key for the JSON (using a different seed)
120+
local public_key_2=$(python3 /usr/local/bin/keygen.py | grep "PUBLIC:" | cut -d: -f2)
121+
cat > "$operators_file" << EOF
122+
[
123+
{
124+
"id": 0,
125+
"address": "0.0.0.0:10009",
126+
"external_address": "spark:10009",
127+
"address_dkg": "spark:10009",
128+
"identity_public_key": "$public_key"
129+
},
130+
{
131+
"id": 1,
132+
"address": "0.0.0.0:10010",
133+
"external_address": "spark2:10010",
134+
"address_dkg": "spark2:10010",
135+
"identity_public_key": "$public_key_2"
136+
}
137+
]
138+
EOF
139+
else
140+
# For operator 1, create the same operators.json with both operators
141+
# But we need to read operator 0's public key from shared config
142+
cat > "$operators_file" << EOF
114143
[
115144
{
145+
"id": 0,
116146
"address": "0.0.0.0:10009",
117147
"external_address": "spark:10009",
148+
"address_dkg": "spark:10009",
149+
"identity_public_key": "0322ca18fc489ae25418a0e768273c2c61cabb823edfb14feb891e9bec62016510"
150+
},
151+
{
152+
"id": 1,
153+
"address": "0.0.0.0:10010",
154+
"external_address": "spark2:10010",
155+
"address_dkg": "spark2:10010",
118156
"identity_public_key": "$public_key"
119157
}
120158
]
121159
EOF
160+
fi
122161
echo "Operators config created"
123162
}
124163

@@ -134,23 +173,31 @@ create_final_config() {
134173

135174
# Function to start spark operator
136175
start_spark_operator() {
176+
local index=${SPARK_INDEX:-0}
177+
local port=$((10009 + index))
178+
local key_file="/home/spark/operator_${index}.key"
179+
local signer_socket="unix:///tmp/frost_${index}.sock"
180+
137181
echo "Starting Spark operator..."
182+
echo "Operator index: $index"
183+
echo "Port: $port"
138184
echo "Database URL being passed: '$DATABASE_URL'"
139185
echo "Database URL starts with postgresql: $(echo "$DATABASE_URL" | grep -q "^postgresql" && echo "YES" || echo "NO")"
140186
echo "Config file database section:"
141187
grep -A 10 "database:" /home/spark/so_config.yaml
142188

143189
echo "Starting with command:"
144-
echo "spark-operator -config /home/spark/so_config.yaml -index 0 -port 10009 -database '$DATABASE_URL' -signer unix:///tmp/frost_0.sock -key /home/spark/operator_0.key -operators /home/spark/operators.json -supported-networks regtest -local -log-level debug"
190+
echo "spark-operator -config /home/spark/so_config.yaml -index $index -port $port -database '$DATABASE_URL' -signer $signer_socket -key $key_file -operators /home/spark/operators.json -threshold 2 -supported-networks regtest -local -log-level debug"
145191

146192
exec spark-operator \
147193
-config /home/spark/so_config.yaml \
148-
-index 0 \
149-
-port 10009 \
194+
-index $index \
195+
-port $port \
150196
-database "$DATABASE_URL" \
151-
-signer unix:///tmp/frost_0.sock \
152-
-key /home/spark/operator_0.key \
197+
-signer $signer_socket \
198+
-key $key_file \
153199
-operators /home/spark/operators.json \
200+
-threshold 2 \
154201
-supported-networks regtest \
155202
-local \
156203
-log-level debug

0 commit comments

Comments
 (0)