-
Notifications
You must be signed in to change notification settings - Fork 69
Expand file tree
/
Copy pathstack
More file actions
executable file
·2144 lines (1830 loc) · 79.9 KB
/
stack
File metadata and controls
executable file
·2144 lines (1830 loc) · 79.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
export DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export PROJECTS_ROOT="$(dirname "$DIR")"
export HELIX_HOST_HOME="$DIR"
export TMUX_SESSION=${TMUX_SESSION:="helix"}
export WITH_RUNNER=${WITH_RUNNER:=""}
export WITH_DEMOS=${WITH_DEMOS:=""}
export STOP_POSTGRES=${STOP_POSTGRES:=""}
export STOP_PGVECTOR=${STOP_PGVECTOR:=""}
export WIPE_SLOTS=${WIPE_SLOTS:="0"}
export COMPOSE_PROFILES=${COMPOSE_PROFILES:=""}
# Smart --load logic has been consolidated into the docker wrapper
# (desktop/shared/docker-buildx-wrapper.sh), which intercepts both
# 'docker build' and 'docker buildx build' and applies smart --load
# for remote builders. The stack script just calls 'docker buildx build'
# directly — the wrapper handles the rest. See the design doc at
# design/2026-02-21-shared-buildkit-cache.md for details.
#
# On the host (without the wrapper), 'docker buildx build' uses the local
# BuildKit directly — which is fine since there's no remote builder overhead.
# Desktop categories for build-sandbox
# Production desktops are always built; experimental require opt-in via EXPERIMENTAL_DESKTOPS
# Note: Using arrays because IFS is set to '\n\t' (no space splitting)
PRODUCTION_DESKTOPS=(ubuntu)
AVAILABLE_EXPERIMENTAL_DESKTOPS=(sway zorin xfce kde)
# EXPERIMENTAL_DESKTOPS can be set as space-separated string, converted to array below
# Configure host networking for Docker-in-Docker support
function setup_dev_networking() {
echo "🌐 Configuring networking for Docker-in-Docker support..."
# Check if already configured
local NEEDS_SETUP=false
if [[ $(cat /proc/sys/net/ipv4/conf/all/route_localnet 2>/dev/null) != "1" ]]; then
NEEDS_SETUP=true
fi
if [[ "$NEEDS_SETUP" == "true" ]]; then
# route_localnet: Allow 127.x.x.x addresses on non-loopback interfaces
# Required for localhost:PORT forwarding to container networks via DNAT
sudo sysctl -w net.ipv4.conf.all.route_localnet=1 >/dev/null 2>&1 || true
sudo sysctl -w net.ipv4.conf.default.route_localnet=1 >/dev/null 2>&1 || true
sudo sysctl -w net.ipv4.ip_forward=1 >/dev/null 2>&1 || true
echo "✅ Docker-in-Docker networking configured (route_localnet, ip_forward)"
else
echo "✅ Docker-in-Docker networking already configured"
fi
# Also ensure inotify limits are sufficient for Zed file watching
local CURRENT_WATCHES=$(cat /proc/sys/fs/inotify/max_user_watches 2>/dev/null || echo "0")
local TARGET_WATCHES=1048576
if [[ "$CURRENT_WATCHES" -lt "$TARGET_WATCHES" ]]; then
echo "📁 Increasing inotify limits for Zed file watching..."
sudo sysctl -w fs.inotify.max_user_watches=$TARGET_WATCHES >/dev/null 2>&1 || true
sudo sysctl -w fs.inotify.max_user_instances=8192 >/dev/null 2>&1 || true
echo "✅ inotify limits increased"
fi
}
# Helper function to check for GPU and set appropriate runner profile
function setup_runner_profile() {
export FORCE_CPU=${FORCE_CPU:=""}
if [[ -n "$FORCE_CPU" ]]; then
# Forced CPU mode
echo "💻 FORCE_CPU is set, forcing CPU mode regardless of GPU detection"
export RUNNER_CONTAINER="runner"
export RUNNER_PROFILE="--profile runner"
export DEV_CPU_ONLY_CMD="DEVELOPMENT_CPU_ONLY=true "
export VLLM_ENV_VARS="VLLM_DEVICE=cpu VLLM_LOGGING_LEVEL=DEBUG"
elif command -v nvidia-smi &> /dev/null && nvidia-smi &> /dev/null; then
# NVIDIA GPU mode
echo "🚀 NVIDIA GPU detected, using GPU support"
export RUNNER_CONTAINER="runner_gpu"
export RUNNER_PROFILE="--profile runner_gpu"
export DEV_CPU_ONLY_CMD=""
export VLLM_ENV_VARS=""
elif [[ -e "/dev/kfd" ]] && [[ -d "/dev/dri" ]] && command -v lspci &> /dev/null && lspci | grep -iE "(VGA|3D|Display).*AMD" &> /dev/null; then
# AMD GPU mode (ROCm)
echo "🚀 AMD GPU detected (ROCm), using AMD GPU support"
export RUNNER_CONTAINER="runner_gpu_amd"
export RUNNER_PROFILE="--profile runner_gpu_amd"
export DEV_CPU_ONLY_CMD=""
export VLLM_ENV_VARS=""
elif [[ -d "/dev/dri" ]] && command -v lspci &> /dev/null && lspci | grep -iE "(VGA|3D|Display).*(Intel|Iris)" &> /dev/null; then
# Intel GPU mode - video encoding supported (QSV), but no GPU compute for LLM inference
echo "🖥️ Intel GPU detected - sandbox/video encoding supported (QSV), external LLM recommended for AI inference"
export RUNNER_CONTAINER="runner"
export RUNNER_PROFILE="--profile runner"
export DEV_CPU_ONLY_CMD="DEVELOPMENT_CPU_ONLY=true "
export VLLM_ENV_VARS="VLLM_DEVICE=cpu VLLM_LOGGING_LEVEL=DEBUG"
else
# CPU mode (fallback)
echo "❌ No supported GPU detected, running without GPU support"
export RUNNER_CONTAINER="runner"
export RUNNER_PROFILE="--profile runner"
export DEV_CPU_ONLY_CMD="DEVELOPMENT_CPU_ONLY=true "
export VLLM_ENV_VARS="VLLM_DEVICE=cpu VLLM_LOGGING_LEVEL=DEBUG"
fi
}
# Helper function to determine sandbox service and container names based on COMPOSE_PROFILES
# Sets SANDBOX_SERVICE (docker-compose service name) and SANDBOX_CONTAINER (docker container name)
function get_sandbox_names() {
local profile="${COMPOSE_PROFILES:-}"
if [[ "$profile" == *"code-software"* ]]; then
export SANDBOX_SERVICE="sandbox-software"
export SANDBOX_CONTAINER="helix-sandbox-software-1"
elif [[ "$profile" == *"code-amd-intel"* ]]; then
export SANDBOX_SERVICE="sandbox-amd-intel"
export SANDBOX_CONTAINER="helix-sandbox-amd-intel-1"
elif [[ "$profile" == *"code-macos"* ]]; then
export SANDBOX_SERVICE="sandbox-macos"
export SANDBOX_CONTAINER="helix-sandbox-macos-1"
else
# Default to NVIDIA (code-nvidia profile)
export SANDBOX_SERVICE="sandbox-nvidia"
export SANDBOX_CONTAINER="helix-sandbox-nvidia-1"
fi
}
# Helper function to detect GPU type and set appropriate sandbox profile
# Sets COMPOSE_PROFILES to include 'code-nvidia' (NVIDIA) or 'code-amd-intel' (AMD/Intel) if not already set
function setup_sandbox_profile() {
# Check if COMPOSE_PROFILES was explicitly set to a non-empty value in environment
# (empty string triggers auto-detection, non-empty respects user's choice)
local env_was_set=""
if [[ -n "${COMPOSE_PROFILES:-}" ]]; then
env_was_set="true"
fi
# Load existing .env if present
if [[ -f "$DIR/.env" ]]; then
source "$DIR/.env"
fi
# Stop conflicting sandbox containers before starting
# Only one sandbox can run at a time (they provide the same service endpoint)
# Silently remove any sandbox containers that might conflict with the one we're about to start
local current_profile="${COMPOSE_PROFILES:-}"
if [[ "$current_profile" == *"code-software"* ]]; then
# Starting software sandbox - stop GPU sandboxes
docker rm -f helix-sandbox-nvidia-1 helix-sandbox-amd-intel-1 helix-sandbox-macos-1 2>/dev/null || true
elif [[ "$current_profile" == *"code-amd-intel"* ]]; then
# Starting AMD/Intel sandbox - stop other sandboxes
docker rm -f helix-sandbox-nvidia-1 helix-sandbox-software-1 helix-sandbox-macos-1 2>/dev/null || true
elif [[ "$current_profile" == *"code-macos"* ]]; then
# Starting macOS sandbox - stop other sandboxes
docker rm -f helix-sandbox-nvidia-1 helix-sandbox-amd-intel-1 helix-sandbox-software-1 2>/dev/null || true
else
# Starting NVIDIA sandbox (default) - stop other sandboxes
docker rm -f helix-sandbox-software-1 helix-sandbox-amd-intel-1 helix-sandbox-macos-1 2>/dev/null || true
fi
# If COMPOSE_PROFILES was set in environment before .env (even to empty), respect it
if [[ "$env_was_set" == "true" ]]; then
echo "🎮 Using COMPOSE_PROFILES from environment: '${COMPOSE_PROFILES:-<empty>}'"
get_sandbox_names
return
fi
# If COMPOSE_PROFILES is explicitly set in .env (even to empty), respect it
if [[ -f "$DIR/.env" ]] && grep -q "^COMPOSE_PROFILES=" "$DIR/.env"; then
echo "🎮 Using COMPOSE_PROFILES from .env: '${COMPOSE_PROFILES:-<empty>}'"
get_sandbox_names
return
fi
# Auto-detect GPU type
local gpu_profile=""
# Check for NVIDIA GPU first
if command -v nvidia-smi &> /dev/null && nvidia-smi &> /dev/null; then
gpu_profile="code-nvidia"
echo "🎮 NVIDIA GPU detected, using 'code-nvidia' sandbox profile"
# Check for AMD GPU (ROCm)
elif [[ -e "/dev/kfd" ]] && [[ -d "/dev/dri" ]] && command -v lspci &> /dev/null && lspci | grep -iE "(VGA|3D|Display).*AMD" &> /dev/null; then
gpu_profile="code-amd-intel"
echo "🎮 AMD GPU detected (ROCm), using 'code-amd-intel' sandbox profile"
# Check for Intel GPU (or generic /dev/dri)
elif [[ -d "/dev/dri" ]] && [[ -n "$(ls -A /dev/dri 2>/dev/null)" ]]; then
gpu_profile="code-amd-intel" # Intel uses same profile as AMD (no nvidia runtime)
echo "🎮 Intel/Generic GPU detected, using 'code-amd-intel' sandbox profile"
else
echo "⚠️ No GPU detected, sandbox features may not work"
get_sandbox_names
return
fi
# Add to COMPOSE_PROFILES if we detected a GPU
if [[ -n "$gpu_profile" ]]; then
if [[ -n "${COMPOSE_PROFILES:-}" ]]; then
export COMPOSE_PROFILES="${COMPOSE_PROFILES},${gpu_profile}"
else
export COMPOSE_PROFILES="$gpu_profile"
fi
echo "📋 COMPOSE_PROFILES set to: $COMPOSE_PROFILES"
fi
# Set sandbox service/container names based on profile
get_sandbox_names
}
function mock-runner() {
echo "🔨 Building helix-runner binary for mock runner..."
build-runner || return 1
echo "🚀 Starting mock runner..."
./helix-runner \
--mock-runner \
--server-port 8090 \
--api-host http://localhost:8080 \
--api-token oh-hallo-insecure-token \
--memory 24GB \
--runner-id mock \
--label gpu=4090 "$@"
}
function build() {
# First detect GPU and set variables
setup_runner_profile
if [[ -n "$WITH_RUNNER" ]]; then
# Check for Zed dependency and build if needed
if [ ! -d "$PROJECTS_ROOT/zed" ]; then
echo "❌ ERROR: Zed source code not found at $PROJECTS_ROOT/zed/"
echo ""
echo "The Zed runner requires the Zed source code to be checked out alongside Helix."
echo ""
echo "Please run:"
echo " cd .."
echo " git clone https://github.com/helixml/zed.git"
echo " cd helix"
echo " WITH_RUNNER=1 ./stack build"
exit 1
fi
if [ ! -f "./zed-build/zed" ]; then
echo "🔨 Zed binary not found, building automatically..."
build-zed || {
echo "❌ Failed to build Zed. Please check the error messages above."
echo "Note: Rust/Cargo is required to build Zed. Install from: https://rustup.rs/"
exit 1
}
fi
echo "🔨 Building runner: $RUNNER_CONTAINER"
docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" build
return
fi
if [[ -n "$WITH_DEMOS" ]]; then
echo "🔨 Building demos"
docker compose -f docker-compose.dev.yaml --profile demos build
return
fi
# No profiles specified, just build everything
echo "🔨 Building all services"
docker compose -f docker-compose.dev.yaml build
}
function static-compile() {
export CGO_ENABLED=0
go build -ldflags '-extldflags "-static"' -o helix .
}
function build-runner() {
echo "🔨 Building helix-runner binary..."
export CGO_ENABLED=1
local APP_VERSION=${APP_VERSION:-"v0.0.0+dev"}
if go build -buildvcs=false -tags '!rocm' -ldflags '-s -w -X github.com/helixml/helix/api/pkg/data.Version='$APP_VERSION -o helix-runner ./runner-cmd/helix-runner; then
echo "✅ Successfully built helix-runner binary"
else
echo "❌ Failed to build helix-runner binary"
return 1
fi
}
function build-runner-image() {
echo "🐳 Building runner Docker image..."
local IMAGE_TAG="${1:-test}"
local APP_VERSION=$(git rev-parse HEAD 2>/dev/null || echo "v0.0.0+dev")
# Base image tag - default to latest-empty for fast test builds (skips ~16GB download)
# Use 'latest-small' for production builds that need full base image
local BASE_TAG="${2:-latest-empty}"
echo " Output: registry.helixml.tech/helix/runner:$IMAGE_TAG"
echo " Base: registry.helixml.tech/helix/runner-base:$BASE_TAG"
echo " Version: $APP_VERSION"
echo " Note: ROCm vLLM build takes ~10 minutes"
echo ""
docker buildx build \
-f Dockerfile.runner \
--build-arg TAG="$BASE_TAG" \
--build-arg APP_VERSION="$APP_VERSION" \
-t "registry.helixml.tech/helix/runner:$IMAGE_TAG" \
.
}
function build-zed() {
# ====================================================================
# Build Zed inside Docker with BuildKit cache mounts
# ====================================================================
# Uses `docker build` with --mount=type=cache for persistent caching of:
# - Cargo registry and git deps
# - Rustup toolchain
# - Build artifacts (incremental compilation)
#
# The binary is extracted via --output without keeping a large image.
# ====================================================================
echo "🔨 Building Zed with External WebSocket Thread Sync (Docker build)..."
local ZED_SOURCE_DIR="$PROJECTS_ROOT/zed"
local ZED_OUTPUT_DIR="./zed-build"
local BUILD_TYPE="${1:-dev}"
# Validate build type
if [[ "$BUILD_TYPE" != "dev" && "$BUILD_TYPE" != "release" ]]; then
echo "❌ Error: BUILD_TYPE must be 'dev' or 'release'"
echo "Usage: ./stack build-zed [dev|release]"
echo ""
echo "Build types:"
echo " dev - Fast incremental builds with debug symbols (default)"
echo " release - Optimized production builds (slower)"
return 1
fi
# Check if Zed source directory exists
if [ ! -d "$ZED_SOURCE_DIR" ]; then
echo "❌ Zed source directory not found at: $ZED_SOURCE_DIR"
echo "Expected directory structure:"
echo " helix/ (current directory)"
echo " zed/ (Zed fork with external_websocket_sync)"
return 1
fi
# Check if external_websocket_sync exists in Zed source
if [ ! -d "$ZED_SOURCE_DIR/crates/external_websocket_sync" ]; then
echo "❌ external_websocket_sync crate not found in Zed source"
echo "Make sure you're using the Zed fork with External WebSocket Thread Sync"
return 1
fi
# Create output directory and ensure we own it
mkdir -p "$ZED_OUTPUT_DIR"
if [ ! -w "$ZED_OUTPUT_DIR" ]; then
echo "⚠️ $ZED_OUTPUT_DIR exists but is not writable"
echo " Fixing permissions..."
sudo chown -R "$USER:$USER" "$ZED_OUTPUT_DIR"
fi
echo "🐳 Building Zed via docker build with BuildKit cache mounts..."
echo " Source: $ZED_SOURCE_DIR"
echo " Output: $ZED_OUTPUT_DIR"
echo " Mode: $BUILD_TYPE"
# Create temporary .dockerignore to exclude build artifacts from context
# Without this, Docker would try to send 200GB+ of target/ as build context
local DOCKERIGNORE="$ZED_SOURCE_DIR/.dockerignore"
local DOCKERIGNORE_EXISTED=false
if [ -f "$DOCKERIGNORE" ]; then
DOCKERIGNORE_EXISTED=true
fi
cat > "$DOCKERIGNORE" << 'EIGNORE'
target/
target-ubuntu25*/
.git/
EIGNORE
echo "🚀 Starting Docker build (cached after first run)..."
# Build using Dockerfile.zed-build with the Zed source as context
# --output extracts just the binary from the scratch final stage
# --build-arg passes the build type (dev or release)
# BuildKit cache mounts in the Dockerfile handle cargo/rustup/target caching
docker buildx build \
--provenance=false \
-f "$DIR/Dockerfile.zed-build" \
--build-arg "BUILD_TYPE=$BUILD_TYPE" \
--output "type=local,dest=$ZED_OUTPUT_DIR" \
"$ZED_SOURCE_DIR"
local BUILD_EXIT=$?
# Clean up temporary .dockerignore (unless it existed before)
if [ "$DOCKERIGNORE_EXISTED" = "false" ]; then
rm -f "$DOCKERIGNORE"
fi
if [ $BUILD_EXIT -ne 0 ]; then
echo "❌ Docker build failed"
return 1
fi
# Verify the binary (--output writes it as ./zed-build/zed)
local BINARY_SIZE=$(du -h "$ZED_OUTPUT_DIR/zed" | cut -f1)
echo "✅ Zed binary built successfully"
echo "📦 Binary size: $BINARY_SIZE"
if [ "$BUILD_TYPE" = "release" ]; then
echo "✅ Binary built with symbols stripped (via linker flags)"
fi
# Create test configuration
cat > "$ZED_OUTPUT_DIR/test-settings.json" << EOF
{
"external_websocket_sync": {
"enabled": true,
"server": {
"enabled": true,
"host": "127.0.0.1",
"port": 3030
},
"websocket_sync": {
"enabled": true,
"external_url": "localhost:8080",
"use_tls": false,
"auto_reconnect": true
}
}
}
EOF
echo "✅ Created test configuration: $ZED_OUTPUT_DIR/test-settings.json"
# Copy Zed app icon for GNOME desktop integration
local ICON_SOURCE="$ZED_SOURCE_DIR/crates/zed/resources/app-icon-dev.png"
if [ -f "$ICON_SOURCE" ]; then
cp "$ICON_SOURCE" "$ZED_OUTPUT_DIR/app-icon.png"
echo "✅ Copied Zed app icon to $ZED_OUTPUT_DIR/app-icon.png"
else
echo "⚠️ Zed app icon not found at $ICON_SOURCE"
fi
echo "🎉 Zed build completed successfully!"
echo ""
echo "Next steps:"
echo " 1. Test the binary: cd $ZED_OUTPUT_DIR && ./zed --version"
echo " 2. Build Sway container with Zed: ./stack build-sway"
echo " 3. Start services: ./stack start"
}
# _start_services: common startup logic shared by start and start-tmux
function _start_services() {
# Check for Zed dependency and build if needed
if [ ! -d "$PROJECTS_ROOT/zed" ]; then
echo "❌ ERROR: Zed source code not found at $PROJECTS_ROOT/zed/"
echo ""
echo "The Zed runner requires the Zed source code to be checked out alongside Helix."
echo ""
echo "Please run:"
echo " cd $PROJECTS_ROOT"
echo " git clone https://github.com/helixml/zed.git"
echo " cd helix"
echo " ./stack start"
exit 1
fi
if [ ! -f "./zed-build/zed" ]; then
echo "🔨 Zed binary not found, building automatically..."
build-zed || {
echo "❌ Failed to build Zed. Please check the error messages above."
exit 1
}
fi
# Always rebuild frontend when FRONTEND_URL=/www is set (production frontend mode)
# This ensures ./stack start always has the latest frontend build
if grep -q "^FRONTEND_URL=/www" .env 2>/dev/null; then
echo "🔨 FRONTEND_URL=/www set, rebuilding frontend..."
build-frontend || {
echo "❌ Failed to build frontend. Please check the error messages above."
exit 1
}
fi
export MANUALRUN=1
export LOG_LEVEL=debug
# Configure host networking for Docker-in-Docker
setup_dev_networking
echo "🐳 Starting docker compose"
# Setup runner profiles first
setup_runner_profile
# Setup sandbox profile based on GPU detection
setup_sandbox_profile
# Stop sandbox if running to ensure fresh start
docker compose -f docker-compose.dev.yaml stop "$SANDBOX_SERVICE" 2>/dev/null || true
# Start services based on enabled profiles
if [[ -n "$WITH_RUNNER" ]]; then
if [[ -n "$WITH_DEMOS" ]]; then
# Both runner and demos
echo "🚀 Starting services with runner ($RUNNER_CONTAINER) and demos profiles"
docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" --profile demos up -d
else
# Just runner
echo "🚀 Starting services with runner ($RUNNER_CONTAINER) profile"
docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" up -d
fi
elif [[ -n "$WITH_DEMOS" ]]; then
# Just demos
echo "🚀 Starting services with demos profile"
docker compose -f docker-compose.dev.yaml --profile demos up -d
else
# No special profiles
echo "🚀 Starting base services"
docker compose -f docker-compose.dev.yaml up -d
fi
sleep 2
# Wait for postgres to be ready before trying to wipe slots
echo "⏳ Waiting for postgres to be ready..."
local timeout=60
while ! docker compose -f docker-compose.dev.yaml exec postgres pg_isready -h localhost -p 5432 >/dev/null 2>&1; do
timeout=$((timeout - 1))
if [[ $timeout -eq 0 ]]; then
echo "⚠️ Warning: Postgres not ready after 60 seconds, continuing anyway"
break
fi
echo "⏳ Waiting for postgres... ($timeout seconds remaining)"
sleep 1
done
# Check if WIPE_SLOTS is set and wipe slots if requested
if [[ -n "$WIPE_SLOTS" ]]; then
echo "🧹 WIPE_SLOTS is set, wiping slots from database..."
if ! wipe-slots; then
echo "⚠️ Warning: Failed to wipe slots, but continuing startup..."
fi
fi
}
function start-tmux() {
if tmux has-session -t "$TMUX_SESSION" 2>/dev/null; then
echo "📺 Session $TMUX_SESSION already exists. Attaching..."
sleep 1
tmux -2 attach -t $TMUX_SESSION
exit 0;
fi
_start_services
echo "📺 Creating tmux session $TMUX_SESSION with 3x2 grid layout + full-width hacking terminal..."
tmux -2 new-session -d -s "$TMUX_SESSION"
# Create a 3x2 grid layout with full-width hacking terminal at bottom
# IMPORTANT: All tmux commands must explicitly target $TMUX_SESSION to avoid
# clobbering the user's current tmux session if they're running inside one
# First create top and middle rows for logs
tmux split-window -t "$TMUX_SESSION" -v -d
tmux split-window -t "$TMUX_SESSION" -v -d
# Split the top row into 3 columns (Frontend, API, Haystack)
tmux select-pane -t "$TMUX_SESSION:0.0"
tmux split-window -t "$TMUX_SESSION" -h -d
tmux select-pane -t "$TMUX_SESSION:0.1"
tmux split-window -t "$TMUX_SESSION" -h -d
# Split the middle row into 3 columns (Zed Agent, Zed Process, GPU Runner)
tmux select-pane -t "$TMUX_SESSION:0.3"
tmux split-window -t "$TMUX_SESSION" -h -d
tmux select-pane -t "$TMUX_SESSION:0.4"
tmux split-window -t "$TMUX_SESSION" -h -d
# Bottom pane (6) stays full-width for hacking terminal
# Set pane titles and start processes in 3x2 + full-width layout
# Top row (0-2): Frontend, API, Haystack
tmux select-pane -t "$TMUX_SESSION:0.0" -T "Frontend Logs"
tmux send-keys -t "$TMUX_SESSION:0.0" 'docker compose -f docker-compose.dev.yaml logs -f frontend' C-m
tmux select-pane -t "$TMUX_SESSION:0.1" -T "API Logs"
tmux send-keys -t "$TMUX_SESSION:0.1" 'docker compose -f docker-compose.dev.yaml logs -f api' C-m
tmux select-pane -t "$TMUX_SESSION:0.2" -T "Haystack Logs"
tmux send-keys -t "$TMUX_SESSION:0.2" 'docker compose -f docker-compose.dev.yaml logs -f haystack' C-m
# Determine sandbox logs command based on Helix-in-Helix mode
local SANDBOX_LOGS_CMD
SANDBOX_LOGS_CMD="docker compose -f docker-compose.dev.yaml logs -f $SANDBOX_SERVICE"
# Middle row (3-5): Context-aware based on WITH_RUNNER
if [[ -n "$WITH_RUNNER" ]]; then
# WITH_RUNNER mode: Sandbox logs in pane 3
tmux select-pane -t "$TMUX_SESSION:0.3" -T "Sandbox Logs"
tmux send-keys -t "$TMUX_SESSION:0.3" "$SANDBOX_LOGS_CMD" C-m
# Pane 4: Kodit logs if kodit profile enabled, otherwise hacking terminal
if [[ "${COMPOSE_PROFILES:-}" == *"kodit"* ]]; then
tmux select-pane -t "$TMUX_SESSION:0.4" -T "Kodit Logs"
tmux send-keys -t "$TMUX_SESSION:0.4" 'docker compose -f docker-compose.dev.yaml logs -f kodit' C-m
else
tmux select-pane -t "$TMUX_SESSION:0.4" -T "🔨 HACKING TERMINAL"
tmux send-keys -t "$TMUX_SESSION:0.4" 'echo "🔨 Hacking terminal ready!" && echo "💡 Tip: Use this for development, debugging, and building"' C-m
fi
# GPU runner logs with air hot reloading
tmux select-pane -t "$TMUX_SESSION:0.5" -T "GPU Runner ($RUNNER_CONTAINER)"
tmux send-keys -t "$TMUX_SESSION:0.5" 'echo "Monitoring GPU Runner logs (with air hot reloading)..." && sleep 3 && docker compose -f docker-compose.dev.yaml --profile '"$RUNNER_CONTAINER"' logs -f '"$RUNNER_CONTAINER" C-m
else
# WITHOUT_RUNNER mode: Sandbox logs
tmux select-pane -t "$TMUX_SESSION:0.3" -T "Sandbox Logs"
tmux send-keys -t "$TMUX_SESSION:0.3" "$SANDBOX_LOGS_CMD" C-m
# Pane 4: Kodit logs if kodit profile enabled, otherwise hacking terminal
if [[ "${COMPOSE_PROFILES:-}" == *"kodit"* ]]; then
tmux select-pane -t "$TMUX_SESSION:0.4" -T "Kodit Logs"
tmux send-keys -t "$TMUX_SESSION:0.4" 'docker compose -f docker-compose.dev.yaml logs -f kodit' C-m
else
tmux select-pane -t "$TMUX_SESSION:0.4" -T "🔨 HACKING TERMINAL"
tmux send-keys -t "$TMUX_SESSION:0.4" 'echo "🔨 Hacking terminal ready!" && echo "💡 Tip: Use this for development, debugging, and building"' C-m
fi
# Middle right pane (5) - contextual based on demos
if [[ -n "$WITH_DEMOS" ]]; then
# Demos interactive session
tmux select-pane -t "$TMUX_SESSION:0.5" -T "Demos"
tmux send-keys -t "$TMUX_SESSION:0.5" 'docker compose -f docker-compose.dev.yaml --profile demos exec demos bash' C-m
else
# Hacking terminal
tmux select-pane -t "$TMUX_SESSION:0.5" -T "🔨 HACKING TERMINAL"
tmux send-keys -t "$TMUX_SESSION:0.5" 'echo "🔨 Hacking terminal ready!" && echo "💡 Tip: Use this for development, debugging, and building"' C-m
fi
fi
# Bottom full-width pane (6) - HACKING TERMINAL! 🔨
tmux select-pane -t "$TMUX_SESSION:0.6" -T "🔨 HACKING TERMINAL"
tmux send-keys -t "$TMUX_SESSION:0.6" 'echo "🔨 Full-width hacking terminal ready!" && echo "💡 Tip: Use this for development, debugging, and building"' C-m
if [[ -n "$WITH_DEMOS" && -n "$WITH_RUNNER" ]]; then
echo "Note: Both GPU runner and demos enabled - demos available in background. Run manually with: docker compose -f docker-compose.dev.yaml --profile demos exec demos bash"
fi
# Enable pane titles display
tmux set-option -t "$TMUX_SESSION" pane-border-status top
tmux set-option -t "$TMUX_SESSION" pane-border-format "#{pane_index}: #{pane_title}"
# Make all panes equal size
tmux select-layout -t "$TMUX_SESSION" even-horizontal
tmux select-layout -t "$TMUX_SESSION" tiled
tmux -2 attach-session -t $TMUX_SESSION
}
function start() {
_start_services
echo ""
echo "✅ Stack started"
docker compose -f docker-compose.dev.yaml ps --format "table {{.Name}}\t{{.Status}}" 2>/dev/null || docker compose -f docker-compose.dev.yaml ps
echo ""
echo "View logs: docker compose -f docker-compose.dev.yaml logs -f api"
echo "Stop: ./stack stop"
echo "Tmux UI: ./stack start-tmux"
}
function stop() {
echo "🛑 Stopping docker containers and tmux session..."
# Build exclude pattern for services that should not be stopped
local exclude_services=()
[[ -z "$STOP_POSTGRES" ]] && exclude_services+=("postgres")
[[ -z "$STOP_PGVECTOR" ]] && exclude_services+=("pgvector")
# Setup runner profiles first
setup_runner_profile
if [[ ${#exclude_services[@]} -eq 0 ]]; then
echo "🗑️ Removing all docker containers"
# Stop containers based on enabled profiles
if [[ -n "$WITH_RUNNER" ]]; then
if [[ -n "$WITH_DEMOS" ]]; then
# Both runner and demos
echo "🔄 Stopping services with runner ($RUNNER_CONTAINER) and demos profiles"
docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" --profile demos down -t 1 || echo "⚠️ Some services may not exist"
else
# Just runner
echo "🔄 Stopping services with runner ($RUNNER_CONTAINER) profile"
docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" down -t 1 || echo "⚠️ Some services may not exist"
fi
elif [[ -n "$WITH_DEMOS" ]]; then
# Just demos
echo "🔄 Stopping services with demos profile"
docker compose -f docker-compose.dev.yaml --profile demos down -t 1 || echo "⚠️ Some services may not exist"
else
# Include all profiles when no environment variables are set
echo "🔄 Stopping all services (all profiles)"
docker compose -f docker-compose.dev.yaml --profile runner --profile runner_gpu --profile demos down -t 1 || echo "⚠️ Some services may not exist"
fi
else
# Create exclude list for display and grep pattern
local exclude_list=$(IFS=', '; echo "${exclude_services[*]}")
local exclude_pattern=$(IFS='|'; echo "${exclude_services[*]}")
echo "🗑️ Removing docker containers (except: $exclude_list)"
# Get list of services to stop (excluding the ones we want to keep)
if [[ -n "$WITH_RUNNER" ]]; then
if [[ -n "$WITH_DEMOS" ]]; then
echo "🔄 Stopping services with runner ($RUNNER_CONTAINER) and demos profiles (except: $exclude_list)"
local services=$(docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" --profile demos config --services 2>/dev/null | grep -v -E "$exclude_pattern" || true)
else
echo "🔄 Stopping services with runner ($RUNNER_CONTAINER) profile (except: $exclude_list)"
local services=$(docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" config --services 2>/dev/null | grep -v -E "$exclude_pattern" || true)
fi
elif [[ -n "$WITH_DEMOS" ]]; then
echo "🔄 Stopping services with demos profile (except: $exclude_list)"
local services=$(docker compose -f docker-compose.dev.yaml --profile demos config --services 2>/dev/null | grep -v -E "$exclude_pattern" || true)
else
echo "🔄 Stopping all services (all profiles, except: $exclude_list)"
local services=$(docker compose -f docker-compose.dev.yaml --profile runner --profile runner_gpu --profile demos config --services 2>/dev/null | grep -v -E "$exclude_pattern" || true)
fi
# Stop only the non-excluded services
if [[ -n "$services" ]]; then
echo "🗑️ Going to remove containers: $(echo $services | tr '\n' ' ')"
# Stop and remove containers using a while loop to avoid xargs command line length issues
while IFS= read -r service; do
if [[ -n "$service" ]]; then
echo "🗑️ Stopping and removing: $service"
docker compose -f docker-compose.dev.yaml stop "$service" 2>/dev/null && \
docker compose -f docker-compose.dev.yaml rm -f "$service" 2>/dev/null || \
echo "⚠️ Could not stop/remove $service"
fi
done <<< "$services"
else
echo "✨ No services to stop (all are excluded)"
fi
fi
echo "📺 Stopping tmux session $TMUX_SESSION..."
if tmux has-session -t $TMUX_SESSION 2>/dev/null; then
tmux kill-session -t $TMUX_SESSION || echo "⚠️ Failed to kill tmux session, but continuing..."
else
echo "📺 Tmux session $TMUX_SESSION not found"
fi
echo "✨ Stop completed successfully!"
}
function up() {
# Setup sandbox profile based on GPU detection (if not already in .env)
setup_sandbox_profile
# Sandbox services are enabled via COMPOSE_PROFILES in .env or auto-detected above
# Profile 'code-nvidia' = NVIDIA GPU, 'code-amd-intel' = AMD/Intel GPU
docker compose -f docker-compose.dev.yaml up -d $@
}
function build-zed-agent() {
echo "🔨 Building Zed agent Docker image..."
# Build the Docker image using the Zed binary we built
if [ ! -f "./zed-build/zed" ]; then
echo "❌ Zed binary not found. Run './stack build-zed' first."
return 1
fi
docker buildx build -t helix-sway:latest -f Dockerfile.sway-helix .
if [ $? -eq 0 ]; then
echo "✅ Zed agent Docker image built successfully"
else
echo "❌ Failed to build Zed agent Docker image"
return 1
fi
}
function zed-agent-up() {
echo "Starting Zed agent services..."
# Build Zed if binary doesn't exist
if [ ! -f "./zed-build/zed" ]; then
echo "Zed binary not found, building first..."
build-zed || return 1
fi
# Check if image doesn't exist
if ! docker image inspect helix/zed-agent:latest &> /dev/null; then
echo "Zed agent image not found, building first..."
build-zed-agent || return 1
fi
docker compose -f docker-compose.zed-agent.yaml up -d
echo "✅ Zed agent services started"
echo "📋 Services running:"
echo " - Helix API: http://localhost:8080"
echo " - Zed HTTP API: http://localhost:3030"
echo " - VNC Web Client: http://localhost:6080"
echo ""
echo "🧪 Test commands:"
echo " curl http://localhost:8080/health # Helix API"
echo " curl http://localhost:3030/health # Zed integration API"
}
function zed-agent-down() {
echo "Stopping Zed agent services..."
docker compose -f docker-compose.zed-agent.yaml down
}
function zed-agent-logs() {
docker compose -f docker-compose.zed-agent.yaml logs -f "${1:-zed-agent-runner}"
}
function rebuild() {
docker compose -f docker-compose.dev.yaml up -d --build $@
}
# Helper function to build image tags string (commit hash + git tag if available)
function get_image_tags() {
local OLD_IFS=$IFS
IFS=' ' # Temporarily use space as IFS for proper word splitting
local IMAGE_BASE=$1
local COMMIT_HASH=$(git rev-parse --short HEAD)
local GIT_TAG=$(git describe --exact-match --tags HEAD 2>/dev/null || echo "")
local TAG_STRING="-t ${IMAGE_BASE}:${COMMIT_HASH}"
if [ -n "$GIT_TAG" ]; then
TAG_STRING="${TAG_STRING} -t ${IMAGE_BASE}:${GIT_TAG}"
echo "🏷️ Git tag detected: ${GIT_TAG}" >&2
fi
printf "%s" "${TAG_STRING}" # Use printf to avoid trailing newline issues
IFS=$OLD_IFS
}
function build-qwen-code() {
# ====================================================================
# Build Qwen Code with Docker BuildKit layer caching
# ====================================================================
# Uses `docker build` with --mount=type=cache for persistent caching of
# npm download cache. Package manifests are copied first so npm ci is
# a cached layer when only source files change.
#
# The build artifacts are extracted via --output without keeping a large image.
# ====================================================================
echo "📦 Building Qwen Code (Docker build with BuildKit cache)..."
local QWEN_SOURCE_DIR="$PROJECTS_ROOT/qwen-code"
local QWEN_OUTPUT_DIR="./qwen-code-build"
# Check if qwen-code source directory exists
if [ ! -d "$QWEN_SOURCE_DIR" ]; then
echo "❌ qwen-code source directory not found at: $QWEN_SOURCE_DIR"
echo "Clone it next to helix with: cd $PROJECTS_ROOT && git clone git@github.com:helixml/qwen-code.git"
return 1
fi
# Get current qwen-code git commit hash
local QWEN_CODE_HASH=$(cd "$QWEN_SOURCE_DIR" && git rev-parse HEAD)
local SAVED_HASH=""
if [ -f "$QWEN_OUTPUT_DIR/.git-commit-hash" ]; then
SAVED_HASH=$(cat "$QWEN_OUTPUT_DIR/.git-commit-hash")
fi
# Check if we need to rebuild
local NEEDS_REBUILD=false
if [ ! -d "$QWEN_OUTPUT_DIR" ] || [ ! -f "$QWEN_OUTPUT_DIR/package.json" ]; then
NEEDS_REBUILD=true
echo "📦 qwen-code-build directory missing or incomplete"
elif [ ! -d "$QWEN_OUTPUT_DIR/dist" ]; then
NEEDS_REBUILD=true
echo "📦 qwen-code dist directory missing (bundle not built)"
elif [ "$QWEN_CODE_HASH" != "$SAVED_HASH" ]; then
NEEDS_REBUILD=true
echo "📦 qwen-code changed: ${SAVED_HASH:0:8} -> ${QWEN_CODE_HASH:0:8}"
fi
if [ "$NEEDS_REBUILD" != "true" ]; then
echo "✅ Using existing qwen-code build at $QWEN_OUTPUT_DIR (${SAVED_HASH:0:8})"
return 0
fi
echo "🐳 Building qwen-code via docker build with BuildKit cache..."
echo " Source: $QWEN_SOURCE_DIR"
echo " Output: $QWEN_OUTPUT_DIR"
# Create temporary .dockerignore to exclude large dirs from build context
local DOCKERIGNORE="$QWEN_SOURCE_DIR/.dockerignore"
local DOCKERIGNORE_EXISTED=false
if [ -f "$DOCKERIGNORE" ]; then
DOCKERIGNORE_EXISTED=true
fi
cat > "$DOCKERIGNORE" << 'EIGNORE'
node_modules/
.git/
dist/
EIGNORE
# Clean output dir before extraction (old builds may have root-owned files)
rm -rf "$QWEN_OUTPUT_DIR" 2>/dev/null || sudo rm -rf "$QWEN_OUTPUT_DIR"
mkdir -p "$QWEN_OUTPUT_DIR"
echo "🚀 Starting Docker build..."
# Build using Dockerfile.qwen-code-build with qwen-code source as context.
# --output extracts just the build artifacts from the scratch final stage.
# BuildKit cache mounts in the Dockerfile handle npm download caching.
# Use type=tar piped through tar to avoid lchown permission errors
# (BuildKit tries to preserve root ownership from container).
# Temporarily disable errexit so .dockerignore cleanup runs on failure.
set +e
docker buildx build \
--provenance=false \
-f "$DIR/Dockerfile.qwen-code-build" \
--output "type=tar,dest=-" \
"$QWEN_SOURCE_DIR" | tar xf - -C "$QWEN_OUTPUT_DIR"
local BUILD_EXIT=${PIPESTATUS[0]}
set -e
# Clean up temporary .dockerignore (unless it existed before)
if [ "$DOCKERIGNORE_EXISTED" = "false" ]; then
rm -f "$DOCKERIGNORE"
fi
if [ $BUILD_EXIT -ne 0 ]; then
echo "❌ Docker build failed"
return 1
fi
# Save the git commit hash for future change detection
echo "$QWEN_CODE_HASH" > "$QWEN_OUTPUT_DIR/.git-commit-hash"
echo "✅ qwen-code built successfully (${QWEN_CODE_HASH:0:8})"
echo "📦 Output: $QWEN_OUTPUT_DIR"
}
function build-xfce() {
echo "🖥️ Building custom XFCE container with passwordless sudo..."
# Build the custom XFCE image
echo "🔨 Building helix-xfce:latest container..."
if docker buildx build -f Dockerfile.xfce-helix -t helix-xfce:latest .; then
echo "✅ XFCE container built successfully"
echo "🖥️ Custom XFCE image ready: helix-xfce:latest"
echo ""
echo "Features added:"
echo " - Passwordless sudo for retro and user accounts"
echo " - Proper work directory permissions"
else
echo "❌ Failed to build XFCE container"
exit 1
fi
}
# Generic desktop build function - builds any desktop (sway, zorin, ubuntu)
# Uses Docker image hashes for content-addressable versioning
function build-desktop() {
local DESKTOP_NAME="$1"
if [ -z "$DESKTOP_NAME" ]; then
echo "Usage: ./stack build-desktop <name>"
echo "Available: sway, zorin, ubuntu, xfce, kde, hyprland"
exit 1
fi
local DOCKERFILE="Dockerfile.${DESKTOP_NAME}-helix"
local IMAGE_NAME="helix-${DESKTOP_NAME}"
# Validate Dockerfile exists
if [ ! -f "$DOCKERFILE" ]; then
echo "❌ Dockerfile not found: $DOCKERFILE"
exit 1
fi
local DESKTOP_BUILD_START=$SECONDS
_dt_elapsed() { local msg="[⏱️ desktop-${DESKTOP_NAME} +$((SECONDS - DESKTOP_BUILD_START))s] $*"; echo "$msg"; [ -n "${BUILD_SANDBOX_TIMING_LOG:-}" ] && echo "$msg" >> "$BUILD_SANDBOX_TIMING_LOG" || true; }
_dt_elapsed "Starting build..."
# Build Zed if binary doesn't exist
if [ ! -f "./zed-build/zed" ]; then
echo "❌ Zed binary not found. Building in release mode first..."
if ! build-zed release; then
echo "❌ Failed to build Zed binary"
exit 1
fi
else