diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fa828e41..71967054 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -511,6 +511,7 @@ jobs: run: | for root in \ src-tauri/resources/syftbox \ + src-tauri/resources/syqure \ src-tauri/resources/bundled/uv \ src-tauri/resources/bundled/java \ src-tauri/resources/bundled/nextflow; do @@ -526,12 +527,14 @@ jobs: run: | # Create placeholder directories and files for resources that tauri.conf.json expects mkdir -p src-tauri/resources/syftbox + mkdir -p src-tauri/resources/syqure mkdir -p src-tauri/resources/bundled/java mkdir -p src-tauri/resources/bundled/nextflow mkdir -p src-tauri/resources/bundled/uv # Create placeholder files touch src-tauri/resources/syftbox/syftbox + touch src-tauri/resources/syqure/syqure echo "Placeholder - bundled deps not included in arm64 build" > src-tauri/resources/bundled/README.txt touch src-tauri/resources/bundled/java/.placeholder touch src-tauri/resources/bundled/nextflow/.placeholder @@ -599,6 +602,68 @@ jobs: Pop-Location } + - name: Build syqure and bundle codon libs + if: matrix.build_kind == 'tauri' && runner.os != 'Windows' && matrix.target != 'linux-arm64' + shell: bash + run: | + chmod +x syqure/syqure_bins.sh + ./syqure/syqure_bins.sh + mkdir -p src-tauri/resources/syqure + if [[ -f syqure/target/debug/syqure ]]; then + cp syqure/target/debug/syqure src-tauri/resources/syqure/syqure + chmod +x src-tauri/resources/syqure/syqure + else + echo "❌ syqure binary not found at syqure/target/debug/syqure" >&2 + exit 1 + fi + # Bundle codon/sequre libs alongside syqure so dyld/$ORIGIN finds them + if [[ -d syqure/target/dist/syqure/lib/codon ]]; then + echo "Copying codon/sequre libs to resources/syqure/lib/codon..." + rm -rf src-tauri/resources/syqure/lib/codon + mkdir -p src-tauri/resources/syqure/lib/codon + cp -RL syqure/target/dist/syqure/lib/codon/. src-tauri/resources/syqure/lib/codon/ + fi + # Fix dylib paths on macOS: bundle transitive homebrew deps and rewrite absolute paths + if [[ "$(uname -s)" == "Darwin" ]]; then + CODON_LIB="src-tauri/resources/syqure/lib/codon" + # Bundle libzstd (transitive dep of libcodonrt/libcodonc) + ZSTD_LIB="" + if command -v brew >/dev/null 2>&1; then + ZSTD_PREFIX="$(brew --prefix zstd 2>/dev/null || true)" + if [[ -n "$ZSTD_PREFIX" && -f "$ZSTD_PREFIX/lib/libzstd.1.dylib" ]]; then + ZSTD_LIB="$ZSTD_PREFIX/lib/libzstd.1.dylib" + fi + fi + for candidate in /opt/homebrew/opt/zstd/lib/libzstd.1.dylib /usr/local/opt/zstd/lib/libzstd.1.dylib; do + if [[ -z "$ZSTD_LIB" && -f "$candidate" ]]; then + ZSTD_LIB="$candidate" + fi + done + if [[ -n "$ZSTD_LIB" ]]; then + echo "Bundling libzstd from $ZSTD_LIB" + cp -L "$ZSTD_LIB" "$CODON_LIB/libzstd.1.dylib" + chmod u+w "$CODON_LIB/libzstd.1.dylib" + else + echo "Warning: libzstd not found; codon dylibs may fail to load" >&2 + fi + # Rewrite absolute homebrew paths to @loader_path in all dylibs + echo "Rewriting dylib paths to @loader_path..." + for dylib in "$CODON_LIB"/*.dylib; do + [[ -f "$dylib" ]] || continue + otool -L "$dylib" 2>/dev/null | awk '{print $1}' | { grep -E '/(opt|usr/local)/' || true; } | while read -r abs_path; do + lib_name="$(basename "$abs_path")" + echo " $(basename "$dylib"): $abs_path -> @loader_path/$lib_name" + install_name_tool -change "$abs_path" "@loader_path/$lib_name" "$dylib" + done + current_id="$(otool -D "$dylib" 2>/dev/null | tail -1)" + if [[ "$current_id" == /opt/* || "$current_id" == /usr/local/opt/* ]]; then + lib_name="$(basename "$current_id")" + echo " Fixing install name: $(basename "$dylib") -> @loader_path/$lib_name" + install_name_tool -id "@loader_path/$lib_name" "$dylib" + fi + done + fi + - name: Verify syftbox binary architecture + smoke run if: matrix.build_kind == 'tauri' && runner.os != 'Windows' shell: bash @@ -741,6 +806,21 @@ jobs: codesign --force --options runtime --timestamp --entitlements "$ENTITLEMENTS" --sign "$APPLE_SIGNING_IDENTITY" "$SYFTBOX_BIN" fi + # Sign syqure binary + SYQURE_BIN="src-tauri/resources/syqure/syqure" + if [[ -f "$SYQURE_BIN" ]]; then + echo "Signing syqure binary..." + codesign --force --options runtime --timestamp --sign "$APPLE_SIGNING_IDENTITY" "$SYQURE_BIN" + fi + + # Sign codon/sequre libs bundled with syqure + if [[ -d "src-tauri/resources/syqure/lib/codon" ]]; then + find src-tauri/resources/syqure/lib/codon -type f \( -name "*.dylib" -o -name "*.so" -o -perm +111 \) | while read -r bin; do + echo "Signing syqure codon lib: $bin" + codesign --force --options runtime --timestamp --sign "$APPLE_SIGNING_IDENTITY" "$bin" || true + done + fi + # Sign all uv binaries if [[ -d "src-tauri/resources/bundled/uv" ]]; then find src-tauri/resources/bundled/uv -type f -perm +111 | while read -r bin; do @@ -775,7 +855,8 @@ jobs: echo "Verifying bundled binaries (codesign only)..." # Per-file spctl is intentionally skipped; nested tools are validated by final app notarization. - for f in "$SYFTBOX_BIN" \ + for f in "$SYFTBOX_BIN" "$SYQURE_BIN" \ + $(find src-tauri/resources/syqure/lib/codon -type f \( -name "*.dylib" -o -name "*.so" -o -perm +111 \) 2>/dev/null) \ $(find src-tauri/resources/bundled/uv -type f -perm +111 2>/dev/null) \ $(find src-tauri/resources/bundled/nextflow -type f -perm +111 2>/dev/null) \ $(find src-tauri/resources/bundled/java/macos-aarch64 -type f \( -perm +111 -o -name \"*.dylib\" \) 2>/dev/null); do diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 282e9c61..a023cd38 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -46,42 +46,29 @@ jobs: fail-fast: false matrix: include: - # flows-pause-resume (linux only for next push) - - scenario: flows-pause-resume + # pipelines-multiparty-flow smoke test (linux) + - scenario: pipelines-multiparty-flow platform: linux-x86_64 runner: namespace-profile-linux-medium client_mode: rust tauri_binary: src-tauri/target/release/bv-desktop run_prefix: "xvfb-run -a" - # flows-collab (linux + windows) - - scenario: flows-collab + + # syqure-multiparty-flow secure-only smoke test (linux) + - scenario: syqure-multiparty-flow-secure-only platform: linux-x86_64 runner: namespace-profile-linux-medium client_mode: rust tauri_binary: src-tauri/target/release/bv-desktop run_prefix: "xvfb-run -a" - - scenario: flows-collab - platform: windows-x86_64 - runner: namespace-profile-windows-medium - client_mode: rust - tauri_binary: src-tauri/target/release/bv-desktop.exe - # jupyter-collab (linux + mac + windows) - - scenario: jupyter-collab + + # syqure-multiparty-allele-freq (linux) + - scenario: syqure-multiparty-allele-freq platform: linux-x86_64 - runner: namespace-profile-linux-medium + runner: namespace-profile-linux-large client_mode: rust tauri_binary: src-tauri/target/release/bv-desktop run_prefix: "xvfb-run -a" - - scenario: jupyter-collab - platform: macos-arm64 - runner: namespace-profile-mac-medium - client_mode: rust - tauri_binary: src-tauri/target/release/bv-desktop - - scenario: jupyter-collab - platform: windows-x86_64 - runner: namespace-profile-windows-medium - client_mode: rust - tauri_binary: src-tauri/target/release/bv-desktop.exe defaults: run: @@ -129,6 +116,9 @@ jobs: workspace-deps-${{ runner.os }}- - name: Setup workspace + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BV_SKIP_SYQURE: ${{ contains(matrix.scenario, 'syqure') && '0' || '1' }} run: | chmod +x scripts/setup-workspace.sh ./scripts/setup-workspace.sh @@ -228,6 +218,15 @@ jobs: build-essential \ pkg-config + - name: Install syqure build dependencies (Linux) + if: runner.os == 'Linux' && contains(matrix.scenario, 'syqure') + run: | + UBUNTU_CODENAME=$(lsb_release -cs 2>/dev/null || echo "jammy") + wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc >/dev/null + echo "deb http://apt.llvm.org/${UBUNTU_CODENAME}/ llvm-toolchain-${UBUNTU_CODENAME}-17 main" | sudo tee /etc/apt/sources.list.d/llvm-17.list + sudo apt-get update + sudo apt-get install -y llvm-17-dev libgmp-dev zstd + # NOTE: Docker setup for macOS removed since pipelines-collab tests are skipped on macOS # (GitHub macOS runners don't support nested virtualization needed for Docker) - name: Check Docker availability (macOS) @@ -331,6 +330,29 @@ jobs: run: cargo build --release working-directory: src-tauri + - name: Build native syqure binary + if: contains(matrix.scenario, 'syqure') + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + cd ${{ github.workspace }}/../syqure + # Override SSH submodule URLs to HTTPS (no SSH keys in CI) + git config --file .gitmodules submodule.codon.url https://github.com/madhavajay/codon.git + git config --file .gitmodules submodule.sequre.url https://github.com/madhavajay/sequre.git + git submodule sync + git submodule update --init --depth 1 codon sequre + # Build bundle and binary (matches syqure CI smoke test) + BUNDLE_ROOT="bin/linux-x86/codon" + BUNDLE_DIR="$(mktemp -d)" + BUNDLE_TAR="$BUNDLE_DIR/codon-linux-x86.tar.zst" + tar -C "$BUNDLE_ROOT" -ch . | zstd -19 -o "$BUNDLE_TAR" + export SYQURE_BUNDLE_FILE="$BUNDLE_TAR" + export SYQURE_BUNDLE_CACHE="$BUNDLE_DIR/cache" + export SYQURE_CPP_INCLUDE="$BUNDLE_ROOT/include" + export SYQURE_CPP_LIB_DIRS="$BUNDLE_ROOT/lib/codon" + export SYQURE_LLVM_INCLUDE="$(llvm-config-17 --includedir)" + cargo build -p syqure + - name: Install maturin (uv) if: runner.os != 'Windows' && matrix.scenario == 'jupyter-collab' run: | @@ -441,6 +463,48 @@ jobs: ./test-scenario.sh --jupyter-collab biovault-beaver/notebooks/02-advanced-features.json fi + - name: Run pipelines multiparty flow scenario + if: matrix.scenario == 'pipelines-multiparty-flow' + env: + SKIP_PLAYWRIGHT_INSTALL: "1" + AUTO_REBUILD_TAURI: "0" + run: | + if [ -n "${{ matrix.run_prefix }}" ]; then + ${{ matrix.run_prefix }} ./test-scenario.sh --pipelines-multiparty-flow + else + ./test-scenario.sh --pipelines-multiparty-flow + fi + + - name: Run syqure multiparty flow secure-only scenario + if: matrix.scenario == 'syqure-multiparty-flow-secure-only' + env: + SKIP_PLAYWRIGHT_INSTALL: "1" + AUTO_REBUILD_TAURI: "0" + BV_SKIP_SYQURE: "0" + SYQURE_SCENARIO_TIMEOUT: "20m" + run: | + # Hard cap runtime so a stuck syqure flow cannot burn CI minutes indefinitely. + if [ -n "${{ matrix.run_prefix }}" ]; then + timeout --preserve-status "$SYQURE_SCENARIO_TIMEOUT" ${{ matrix.run_prefix }} ./test-scenario.sh --syqure-multiparty-flow --syqure-secure-only + else + timeout --preserve-status "$SYQURE_SCENARIO_TIMEOUT" ./test-scenario.sh --syqure-multiparty-flow --syqure-secure-only + fi + + - name: Run syqure multiparty allele-freq scenario + if: matrix.scenario == 'syqure-multiparty-allele-freq' + env: + SKIP_PLAYWRIGHT_INSTALL: "1" + AUTO_REBUILD_TAURI: "0" + BV_SKIP_SYQURE: "0" + SYQURE_SCENARIO_TIMEOUT: "20m" + run: | + # Hard cap runtime so a stuck syqure flow cannot burn CI minutes indefinitely. + if [ -n "${{ matrix.run_prefix }}" ]; then + timeout --preserve-status "$SYQURE_SCENARIO_TIMEOUT" ${{ matrix.run_prefix }} ./test-scenario.sh --syqure-multiparty-allele-freq + else + timeout --preserve-status "$SYQURE_SCENARIO_TIMEOUT" ./test-scenario.sh --syqure-multiparty-allele-freq + fi + - name: Upload test artifacts on failure if: failure() uses: actions/upload-artifact@v4 @@ -452,6 +516,7 @@ jobs: artifacts/ profiles-e2e: + if: false name: profiles-e2e (linux-x86_64) runs-on: namespace-profile-linux-medium env: @@ -619,6 +684,7 @@ jobs: artifacts/ profiles-e2e-macos: + if: false name: profiles-e2e (macos-arm64) runs-on: namespace-profile-mac-medium env: @@ -763,6 +829,7 @@ jobs: artifacts/ profiles-e2e-windows: + if: false name: profiles-e2e (windows-x86_64) runs-on: namespace-profile-windows-medium defaults: diff --git a/.gitignore b/.gitignore index dd9ce844..7cd4ec15 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,9 @@ dist-ssr target/ src-tauri/resources/syftbox/syftbox src-tauri/resources/syftbox/syftbox.exe +src-tauri/resources/syqure/syqure +src-tauri/resources/syqure/syqure.exe +src-tauri/resources/syqure/lib/ src-tauri/resources/bundled/ # Editor directories and files diff --git a/biovault-app-dev.sh b/biovault-app-dev.sh new file mode 100755 index 00000000..cb16c071 --- /dev/null +++ b/biovault-app-dev.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -euo pipefail + +# Thin wrapper around biovault-app.sh that uses the local debug binary. +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +DEBUG_BIN="${APP_BIN:-$SCRIPT_DIR/src-tauri/target/debug/bv-desktop}" +FORCE_REBUILD="${BIOVAULT_DEV_REBUILD:-0}" + +ARGS=() +for arg in "$@"; do + case "$arg" in + --rebuild) + FORCE_REBUILD=1 + ;; + *) + ARGS+=("$arg") + ;; + esac +done + +if [[ "$FORCE_REBUILD" == "1" || ! -x "$DEBUG_BIN" ]]; then + if [[ "$FORCE_REBUILD" == "1" ]]; then + echo "Forcing debug rebuild (cargo build)..." + else + echo "Debug binary not found at $DEBUG_BIN" + echo "Building debug binary (cargo build)..." + fi + (cd "$SCRIPT_DIR/src-tauri" && cargo build) +fi + +if [[ ! -x "$DEBUG_BIN" ]]; then + echo "Debug binary not found at $DEBUG_BIN" + echo "Debug binary still missing at $DEBUG_BIN" + exit 1 +fi + +APP_BIN="$DEBUG_BIN" exec "$SCRIPT_DIR/biovault-app.sh" "${ARGS[@]}" diff --git a/biovault-app.sh b/biovault-app.sh new file mode 100755 index 00000000..006850eb --- /dev/null +++ b/biovault-app.sh @@ -0,0 +1,215 @@ +#!/bin/bash +set -euo pipefail + +# Launch multiple BioVault.app instances with separate home dirs for multiparty testing. +# +# Usage: +# ./biovault-app.sh --emails user1@openmined.org,user2@openmined.org,user3@openmined.org ./test-run +# ./biovault-app.sh --emails a@x.com,b@x.com,c@x.com ~/BioVaultTest +# ./biovault-app.sh --stop # kill all instances +# +# Each email gets its own BIOVAULT_HOME under the given path. +# Each instance gets unique ports for SyftBox, WS bridge, and HTTP bridge. +# The window title includes the email so you can tell them apart. + +APP_BIN="${APP_BIN:-/Applications/BioVault.app/Contents/MacOS/bv-desktop}" +APP_PIDS=() +SED_PIDS=() + +RED='\033[0;31m' +GREEN='\033[0;32m' +CYAN='\033[0;36m' +YELLOW='\033[1;33m' +NC='\033[0m' + +usage() { + cat < + $0 --stop + +Options: + --emails CSV Comma-separated list of emails (one BioVault instance per email) + --stop Kill all running bv-desktop instances launched by this script + -h, --help Show this help + +Environment: + APP_BIN Path to bv-desktop binary (default: /Applications/BioVault.app/Contents/MacOS/bv-desktop) + +Example: + $0 --emails alice@openmined.org,bob@openmined.org,carol@openmined.org ./multiparty-test +EOF +} + +cleanup() { + echo -e "\n${YELLOW}Shutting down all BioVault instances...${NC}" + # Kill the actual bv-desktop processes first + for pid in "${APP_PIDS[@]}"; do + kill "$pid" 2>/dev/null || true + done + sleep 2 + # Force kill any that didn't exit gracefully + for pid in "${APP_PIDS[@]}"; do + kill -9 "$pid" 2>/dev/null || true + done + # Clean up sed processes + for pid in "${SED_PIDS[@]}"; do + kill "$pid" 2>/dev/null || true + done + wait 2>/dev/null || true + echo -e "${GREEN}All instances stopped.${NC}" + exit 0 +} + +if [[ $# -eq 0 ]]; then + usage + exit 1 +fi + +EMAILS="" +BASE_PATH="" + +while [[ $# -gt 0 ]]; do + case "$1" in + --emails) + [[ -z "${2:-}" ]] && { echo "Error: --emails requires a CSV value"; exit 1; } + EMAILS="$2" + shift 2 + ;; + --stop) + mapfile -t pids < <(pgrep -f -- "$APP_BIN" || true) + if [[ ${#pids[@]} -eq 0 ]]; then + echo "No matching processes found for APP_BIN=$APP_BIN" + else + kill "${pids[@]}" 2>/dev/null || true + sleep 1 + mapfile -t remaining < <(pgrep -f -- "$APP_BIN" || true) + if [[ ${#remaining[@]} -gt 0 ]]; then + kill -9 "${remaining[@]}" 2>/dev/null || true + fi + echo "Stopped ${#pids[@]} process(es) for APP_BIN=$APP_BIN" + fi + exit 0 + ;; + -h|--help) + usage + exit 0 + ;; + -*) + echo "Unknown option: $1" + usage + exit 1 + ;; + *) + BASE_PATH="$1" + shift + ;; + esac +done + +if [[ -z "$EMAILS" ]]; then + echo "Error: --emails is required" + usage + exit 1 +fi + +if [[ -z "$BASE_PATH" ]]; then + echo "Error: path argument is required" + usage + exit 1 +fi + +if [[ ! -x "$APP_BIN" ]]; then + echo -e "${RED}bv-desktop not found at $APP_BIN${NC}" + echo "Set APP_BIN to a valid executable path." + exit 1 +fi + +IFS=',' read -ra EMAIL_LIST <<< "$EMAILS" + +if [[ ${#EMAIL_LIST[@]} -lt 1 ]]; then + echo "Error: at least one email is required" + exit 1 +fi + +BASE_PATH="$(cd "$(dirname "$BASE_PATH")" 2>/dev/null && pwd)/$(basename "$BASE_PATH")" +mkdir -p "$BASE_PATH" + +# Pick a random base port in the ephemeral range to avoid collisions with other services. +# Each instance uses 3 consecutive ports: syftbox, ws-bridge, http-bridge. +RAND_BASE=$((49152 + RANDOM % 10000)) +# Align to 10-port boundary per instance for readability +RAND_BASE=$((RAND_BASE / 10 * 10)) + +trap cleanup INT TERM EXIT + +echo -e "${CYAN}══════════════════════════════════════════════════${NC}" +echo -e "${CYAN} BioVault Multiparty Launcher (${#EMAIL_LIST[@]} instances)${NC}" +echo -e "${CYAN}══════════════════════════════════════════════════${NC}" +echo -e "${YELLOW}Binary:${NC} $APP_BIN" +echo -e "${YELLOW}Base path:${NC} $BASE_PATH" +echo "" + +for i in "${!EMAIL_LIST[@]}"; do + email="${EMAIL_LIST[$i]}" + home_dir="$BASE_PATH/$email" + env_home="$home_dir/.env-home" + xdg_config_home="$env_home/.config" + xdg_cache_home="$env_home/.cache" + xdg_data_home="$env_home/.local/share" + xdg_state_home="$env_home/.local/state" + tmp_dir="$env_home/tmp" + mkdir -p "$home_dir" + mkdir -p "$xdg_config_home" "$xdg_cache_home" "$xdg_data_home" "$xdg_state_home" "$tmp_dir" + + # Each instance gets 3 unique ports, spaced by 10 per instance + offset=$((i * 10)) + syftbox_port=$((RAND_BASE + offset)) + ws_bridge_port=$((RAND_BASE + offset + 1)) + http_bridge_port=$((RAND_BASE + offset + 2)) + + echo -e "${GREEN}[$((i+1))]${NC} $email" + echo -e " Home: $home_dir" + echo -e " Env HOME: $env_home" + echo -e " SyftBox: http://127.0.0.1:$syftbox_port" + echo -e " WS bridge: $ws_bridge_port" + echo -e " HTTP bridge: $http_bridge_port" + + # Launch bv-desktop directly (no pipeline) so we capture the real PID. + # Use process substitution for log prefixing instead of piping through sed. + env -i \ + HOME="$HOME" \ + USER="$USER" \ + PATH="$PATH" \ + SHELL="$SHELL" \ + TMPDIR="$tmp_dir" \ + XDG_CONFIG_HOME="$xdg_config_home" \ + XDG_CACHE_HOME="$xdg_cache_home" \ + XDG_DATA_HOME="$xdg_data_home" \ + XDG_STATE_HOME="$xdg_state_home" \ + DISPLAY="${DISPLAY:-}" \ + TERM="${TERM:-xterm-256color}" \ + LANG="${LANG:-en_US.UTF-8}" \ + BIOVAULT_HOME="$home_dir" \ + BIOVAULT_PROFILES_DIR="$home_dir/.bvprofiles" \ + BIOVAULT_DISABLE_PROFILES=1 \ + BIOVAULT_WINDOW_TITLE="BioVault — $email" \ + SYFTBOX_EMAIL="$email" \ + SYFTBOX_DATA_DIR="$home_dir" \ + SYFTBOX_CLIENT_URL="http://127.0.0.1:$syftbox_port" \ + DEV_WS_BRIDGE_PORT="$ws_bridge_port" \ + DEV_WS_BRIDGE_HTTP_PORT="$http_bridge_port" \ + "$APP_BIN" > >(sed "s/^/[$email] /") 2>&1 & + APP_PIDS+=($!) + + # Also track the sed PID for cleanup + SED_PIDS+=($(jobs -p | tail -1)) + + sleep 2 +done + +echo "" +echo -e "${CYAN}All ${#EMAIL_LIST[@]} instances launched. Press Ctrl+C to stop all.${NC}" +echo "" + +# Wait for all app processes; cleanup trap fires on Ctrl+C +wait "${APP_PIDS[@]}" 2>/dev/null || true diff --git a/build-signed.sh b/build-signed.sh index 9ba60019..bcc2065a 100755 --- a/build-signed.sh +++ b/build-signed.sh @@ -90,6 +90,58 @@ case "$(uname -s)" in echo "❌ syqure binary not found at syqure/target/debug/syqure" >&2 exit 1 fi + # Bundle codon/sequre libs alongside syqure so dyld finds them via @loader_path/lib/codon + if [[ -d syqure/target/dist/syqure/lib/codon ]]; then + echo "Copying codon/sequre libs to resources/syqure/lib/codon..." + rm -rf src-tauri/resources/syqure/lib/codon + mkdir -p src-tauri/resources/syqure/lib/codon + cp -RL syqure/target/dist/syqure/lib/codon/. src-tauri/resources/syqure/lib/codon/ + fi + # Fix dylib paths: bundle transitive homebrew deps and rewrite absolute paths + if [[ "$(uname -s)" == "Darwin" ]]; then + CODON_LIB="src-tauri/resources/syqure/lib/codon" + # Bundle libzstd alongside codon libs + ZSTD_LIB="" + if command -v brew >/dev/null 2>&1; then + ZSTD_PREFIX="$(brew --prefix zstd 2>/dev/null || true)" + if [[ -n "$ZSTD_PREFIX" && -f "$ZSTD_PREFIX/lib/libzstd.1.dylib" ]]; then + ZSTD_LIB="$ZSTD_PREFIX/lib/libzstd.1.dylib" + fi + fi + if [[ -z "$ZSTD_LIB" ]]; then + for candidate in /opt/homebrew/opt/zstd/lib/libzstd.1.dylib /usr/local/opt/zstd/lib/libzstd.1.dylib; do + if [[ -f "$candidate" ]]; then + ZSTD_LIB="$candidate" + break + fi + done + fi + if [[ -n "$ZSTD_LIB" ]]; then + echo "Bundling libzstd from $ZSTD_LIB" + cp -L "$ZSTD_LIB" "$CODON_LIB/libzstd.1.dylib" + chmod u+w "$CODON_LIB/libzstd.1.dylib" + else + echo "Warning: libzstd not found; codon dylibs may fail to load" >&2 + fi + # Rewrite absolute homebrew paths to @loader_path in all dylibs + echo "Rewriting dylib paths to @loader_path..." + for dylib in "$CODON_LIB"/*.dylib; do + [[ -f "$dylib" ]] || continue + # Rewrite any /opt/homebrew/.../libzstd.1.dylib or /usr/local/.../libzstd.1.dylib + otool -L "$dylib" 2>/dev/null | awk '{print $1}' | { grep -E '/(opt|usr/local)/' || true; } | while read -r abs_path; do + lib_name="$(basename "$abs_path")" + echo " $(basename "$dylib"): $abs_path -> @loader_path/$lib_name" + install_name_tool -change "$abs_path" "@loader_path/$lib_name" "$dylib" + done + # Fix install name if it's an absolute homebrew path + current_id="$(otool -D "$dylib" 2>/dev/null | tail -1)" + if [[ "$current_id" == /opt/* || "$current_id" == /usr/local/opt/* ]]; then + lib_name="$(basename "$current_id")" + echo " Fixing install name: $(basename "$dylib") -> @loader_path/$lib_name" + install_name_tool -id "@loader_path/$lib_name" "$dylib" + fi + done + fi ;; esac diff --git a/dev-three.sh b/dev-three.sh new file mode 100755 index 00000000..4234a5fe --- /dev/null +++ b/dev-three.sh @@ -0,0 +1,777 @@ +#!/bin/bash +set -euo pipefail + +# ============================================================================= +# dev-three.sh - Launch THREE BioVault Desktop instances for multiparty testing +# ============================================================================= +# +# This script sets up 3 clients for testing multiparty flows: +# - client1@sandbox.local (contributor1) +# - client2@sandbox.local (contributor2) +# - aggregator@sandbox.local (aggregator) +# +# Quick start: +# ./dev-three.sh --reset # fresh stack + three desktops +# ./dev-three.sh --reset --single # fresh stack + single desktop (first client) +# ./dev-three.sh --stop # stop devstack and desktop pids +# +# ============================================================================= + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +WORKSPACE_ROOT="${WORKSPACE_ROOT:-$SCRIPT_DIR}" +BIOVAULT_DIR="${BIOVAULT_DIR:-$WORKSPACE_ROOT/biovault}" +SYFTBOX_DIR="${SYFTBOX_DIR:-$WORKSPACE_ROOT/syftbox}" +if [[ ! -d "$SYFTBOX_DIR" && -d "$BIOVAULT_DIR/syftbox" ]]; then + SYFTBOX_DIR="$BIOVAULT_DIR/syftbox" +fi +WORKSPACE_ROOT="$WORKSPACE_ROOT" "$SCRIPT_DIR/scripts/ensure-workspace-deps.sh" \ + "biovault/cli/Cargo.toml" \ + "syftbox-sdk/Cargo.toml" \ + "syftbox/rust/Cargo.toml" +if [[ ! -f "$BIOVAULT_DIR/tests/scripts/devstack.sh" ]]; then + echo "Missing devstack script at $BIOVAULT_DIR/tests/scripts/devstack.sh" >&2 + echo "Fix: run ./repo --init && ./repo sync from $WORKSPACE_ROOT" >&2 + exit 1 +fi +if [[ ! -d "$SYFTBOX_DIR" ]]; then + echo "Missing syftbox repo at $SYFTBOX_DIR" >&2 + echo "Fix: run ./repo --init && ./repo sync from $WORKSPACE_ROOT" >&2 + exit 1 +fi +DEVSTACK_SCRIPT="$BIOVAULT_DIR/tests/scripts/devstack.sh" +SANDBOX_ROOT="${SANDBOX_DIR:-$BIOVAULT_DIR/sandbox}" +WS_PORT_BASE="${DEV_WS_BRIDGE_PORT_BASE:-3333}" + +# Three clients for multiparty +DEFAULT_CLIENT1="${CLIENT1_EMAIL:-client1@sandbox.local}" +DEFAULT_CLIENT2="${CLIENT2_EMAIL:-client2@sandbox.local}" +DEFAULT_AGGREGATOR="${AGG_EMAIL:-aggregator@sandbox.local}" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +NC='\033[0m' + +log_info() { echo -e "${BLUE}ℹ️ $1${NC}"; } +log_success() { echo -e "${GREEN}✓ $1${NC}"; } +log_warn() { echo -e "${YELLOW}⚠️ $1${NC}"; } +log_error() { echo -e "${RED}✗ $1${NC}"; } +log_header() { echo -e "\n${CYAN}═══════════════════════════════════════════════════════════${NC}"; echo -e "${CYAN} $1${NC}"; echo -e "${CYAN}═══════════════════════════════════════════════════════════${NC}\n"; } + +usage() { + cat </dev/null 2>&1 || { log_error "python3 is required"; exit 1; } + command -v go >/dev/null 2>&1 || { log_error "Go is required to run the sbdev devstack"; exit 1; } + [[ -f "$DEVSTACK_SCRIPT" ]] || { log_error "Devstack helper not found at $DEVSTACK_SCRIPT"; exit 1; } + if ! command -v npm >/dev/null 2>&1; then + log_error "npm is required to run the desktop" + exit 1 + fi + + log_success "Requirements look good" +} + +ensure_bv_cli() { + local target="$BIOVAULT_DIR/cli/target/release/bv" + if [[ ! -x "$target" ]]; then + log_info "Building BioVault CLI (cargo build --release)..." + (cd "$BIOVAULT_DIR/cli" && cargo build --release >/dev/null 2>&1) + fi + BV_CLI_BIN="$target" +} + +sbdev_tool() { + (cd "$SYFTBOX_DIR" && GOCACHE="$SYFTBOX_DIR/.gocache" go run ./cmd/devstack "$@") +} + +find_state_file() { + local candidates=( + "$SANDBOX_ROOT/relay/state.json" + "$SANDBOX_ROOT/state.json" + ) + for path in "${candidates[@]}"; do + if [[ -f "$path" ]]; then + echo "$path" + return 0 + fi + done + return 1 +} + +load_state() { + STACK_STATE_FILE="$(find_state_file || true)" + if [[ -z "$STACK_STATE_FILE" ]]; then + log_error "Devstack state not found in $SANDBOX_ROOT (run with --reset to create)" + exit 1 + fi + + SERVER_PORT="$(python3 - "$STACK_STATE_FILE" <<'PY' +import json, sys +data = json.load(open(sys.argv[1])) +print(data["server"]["port"]) +PY +)" + SERVER_URL="http://127.0.0.1:${SERVER_PORT}" + + CLIENT_LINES=() + while IFS= read -r line; do + [[ -z "$line" ]] && continue + CLIENT_LINES+=("$line") + done < <(python3 - "$STACK_STATE_FILE" <<'PY' +import json, sys +data = json.load(open(sys.argv[1])) +for c in data.get("clients", []): + print("|".join([ + c.get("email", ""), + c.get("home_path", ""), + c.get("config", ""), + c.get("server_url", ""), + str(c.get("port", "")), + ])) +PY +) + + if [[ ${#CLIENT_LINES[@]} -eq 0 ]]; then + log_error "No clients found in devstack state" + exit 1 + fi +} + +client_field() { + local email="$1" + local field="$2" + local line home cfg srv port + for line in "${CLIENT_LINES[@]}"; do + IFS='|' read -r em home cfg srv port <<<"$line" + if [[ "$em" == "$email" ]]; then + case "$field" in + home) echo "$home" ;; + config) echo "$cfg" ;; + server) echo "$srv" ;; + port) echo "$port" ;; + *) return 1 ;; + esac + return 0 + fi + done + return 1 +} + +stop_desktops() { + log_header "Stopping desktop processes" + + pkill -f "jupyter.*$SANDBOX_ROOT" 2>/dev/null || true + + shopt -s nullglob + for pid_file in "$SANDBOX_ROOT"/*/desktop.pid; do + local pid + pid="$(tr -d ' \n\r' < "$pid_file")" + if [[ -n "$pid" ]] && ps -p "$pid" >/dev/null 2>&1; then + log_info "Stopping desktop pid $pid ($pid_file)" + kill "$pid" 2>/dev/null || true + fi + rm -f "$pid_file" + done + shopt -u nullglob + + pkill -f "tauri dev" 2>/dev/null || true + pkill -f "cargo-tauri" 2>/dev/null || true +} + +stop_stack() { + stop_desktops + log_info "Stopping SyftBox devstack..." + local args=(--sandbox "$SANDBOX_ROOT" --stop) + (( RESET_FLAG )) && args+=(--reset) + if ! bash "$DEVSTACK_SCRIPT" "${args[@]}"; then + log_warn "devstack stop reported an issue (continuing)" + fi + log_info "Pruning global sbdev state..." + sbdev_tool prune >/dev/null 2>&1 || log_warn "Global prune failed (continuing)" + log_success "Stopped" +} + +start_stack() { + local existing_state + existing_state="$(find_state_file || true)" + + if [[ -n "$existing_state" && $RESET_FLAG -eq 0 ]]; then + if ((${#CLIENTS[@]})); then + local missing + missing="$( + python3 - "$existing_state" "${CLIENTS[@]}" <<'PY' +import json, sys +state = json.load(open(sys.argv[1])) +want = sys.argv[2:] +have = {c.get("email") for c in state.get("clients", [])} +missing = [c for c in want if c not in have] +print(",".join(missing)) +PY + )" + if [[ -n "$missing" ]]; then + log_warn "Existing devstack missing requested clients: $missing. Rebuilding with --reset." + RESET_FLAG=1 + else + log_info "Existing devstack state found at $existing_state (use --reset to rebuild)" + return + fi + else + log_info "Existing devstack state found at $existing_state (use --reset to rebuild)" + return + fi + fi + + log_header "Starting SyftBox devstack (3 clients)" + + log_info "Pruning any dead sbdev stacks (global)" + sbdev_tool prune >/dev/null 2>&1 || log_warn "Global prune failed (continuing)" + + local client_csv + client_csv="$(IFS=,; echo "${CLIENTS[*]}")" + + local args=(--sandbox "$SANDBOX_ROOT" --clients "$client_csv") + (( RESET_FLAG )) && args+=(--reset) + (( SKIP_SYNC_CHECK )) && args+=(--skip-sync-check) + if [[ "${BV_DEVSTACK_SKIP_KEYS:-0}" == "1" ]]; then + args+=(--skip-keys) + fi + + local -a env_prefix=(BIOVAULT_DISABLE_PROFILES=1) + if [[ -z "${BV_DEVSTACK_CLIENT_MODE:-}" ]]; then + # Multiparty/syqure flows need rust SyftBox daemons for hotlink/TCP proxy + # transport. dev-three.sh always enables hotlink, so default to rust. + # (matches CI matrix: client_mode=rust and test-scenario.sh line 354-355) + env_prefix+=(BV_DEVSTACK_CLIENT_MODE=rust) + fi + env "${env_prefix[@]}" bash "$DEVSTACK_SCRIPT" "${args[@]}" + + log_info "Active sbdev stacks:" + sbdev_tool list || log_warn "Could not list sbdev stacks" +} + +prepare_desktop_config() { + local email="$1" + local home config + home="$(client_field "$email" home)" || { log_error "No client home for $email"; exit 1; } + config="$(client_field "$email" config)" || { log_error "No config path for $email"; exit 1; } + + mkdir -p "$home" + log_info "Home directory ready for $email at $home" +} + +parse_data_dir() { + local config_path="$1" + python3 - "$config_path" <<'PY' +import json, sys +cfg = json.load(open(sys.argv[1])) +print(cfg.get("data_dir","")) +PY +} + +launch_desktop_instance() { + local email="$1" + local instance_num="$2" + local background="$3" + local ws_port=$((WS_PORT_BASE + instance_num - 1)) + + local home config server data_dir + home="$(client_field "$email" home)" || { log_error "No client home for $email"; exit 1; } + config="$(client_field "$email" config)" || { log_error "No config path for $email"; exit 1; } + server="$(client_field "$email" server)" + [[ -z "$server" ]] && server="$SERVER_URL" + data_dir="$(parse_data_dir "$config")" + [[ -z "$data_dir" ]] && data_dir="$home" + + prepare_desktop_config "$email" + + export BIOVAULT_HOME="$home" + export BIOVAULT_DEV_MODE=1 + export BIOVAULT_DEV_SYFTBOX=1 + export BIOVAULT_DISABLE_PROFILES=1 + export BV_SYFTBOX_BACKEND="${BV_SYFTBOX_BACKEND:-embedded}" + if [[ "$BV_SYFTBOX_BACKEND" != "process" ]]; then + unset SYFTBOX_BINARY SYFTBOX_VERSION + fi + export SYFTBOX_SERVER_URL="$server" + export SYFTBOX_CONFIG_PATH="$config" + export SYFTBOX_DATA_DIR="$data_dir" + export SYC_VAULT="$SYFTBOX_DATA_DIR/.syc" + export DEV_WS_BRIDGE=1 + export DEV_WS_BRIDGE_PORT="$ws_port" + + # Hotlink/TCP proxy vars already exported in main() before devstack starts. + # Syqure binary/codon discovery (per-instance, depends on WORKSPACE_ROOT) + if [[ -z "${SEQURE_NATIVE_BIN:-}" ]]; then + local syqure_dev="$WORKSPACE_ROOT/syqure/target/release/syqure" + if [[ -x "$syqure_dev" ]]; then + export SEQURE_NATIVE_BIN="$syqure_dev" + fi + fi + if [[ -z "${CODON_PATH:-}" ]]; then + local arch; arch="$(uname -m)" + local os_tag="macos" + [[ "$(uname -s)" == "Linux" ]] && os_tag="linux" + local codon_candidate="$WORKSPACE_ROOT/syqure/bin/${os_tag}-${arch}/codon" + if [[ -d "$codon_candidate" ]]; then + export CODON_PATH="$codon_candidate" + fi + fi + + local pkg_cmd="npm" + local role_label="" + case "$email" in + *client1*) role_label="contributor1" ;; + *client2*) role_label="contributor2" ;; + *aggregator*) role_label="aggregator" ;; + esac + + echo "" + echo -e "${CYAN}════════════════════════════════════════════════════════════${NC}" + echo -e "${CYAN} Desktop Instance ${instance_num}: ${email} (${role_label})${NC}" + echo -e "${CYAN}════════════════════════════════════════════════════════════${NC}" + echo -e "${YELLOW} BIOVAULT_HOME: $BIOVAULT_HOME${NC}" + echo -e "${YELLOW} SYFTBOX_DATA_DIR: $SYFTBOX_DATA_DIR${NC}" + echo -e "${YELLOW} SYFTBOX_CONFIG: $SYFTBOX_CONFIG_PATH${NC}" + echo -e "${YELLOW} SYC_VAULT: $SYC_VAULT${NC}" + echo -e "${YELLOW} SyftBox backend: $BV_SYFTBOX_BACKEND${NC}" + echo -e "${YELLOW} Server: $SYFTBOX_SERVER_URL${NC}" + echo -e "${YELLOW} WS Bridge Port: $DEV_WS_BRIDGE_PORT${NC}" + echo -e "${CYAN}════════════════════════════════════════════════════════════${NC}" + + if [[ "$background" == "bg" ]]; then + (cd "$SCRIPT_DIR" && $pkg_cmd run dev 2>&1 | sed "s/^/[${role_label}] /") & + local pid=$! + echo "$pid" > "$home/desktop.pid" + log_info "Desktop ${instance_num} (${role_label}) started in background (pid $pid)" + else + (cd "$SCRIPT_DIR" && $pkg_cmd run dev 2>&1 | sed "s/^/[${role_label}] /") + fi +} + +print_stack_summary() { + log_header "Devstack Summary (Multiparty)" + echo -e "${YELLOW}Sandbox:${NC} $SANDBOX_ROOT" + echo -e "${YELLOW}Server:${NC} $SERVER_URL" + local roles=("contributor1" "contributor2" "aggregator") + for idx in "${!CLIENTS[@]}"; do + local email="${CLIENTS[$idx]}" + local role="${roles[$idx]}" + local home config port + home="$(client_field "$email" home)" || { log_error "No client home for $email"; exit 1; } + config="$(client_field "$email" config)" || { log_error "No config path for $email"; exit 1; } + port="$(client_field "$email" port)" || { log_error "No daemon port for $email"; exit 1; } + echo -e " Client $((idx+1)) (${role}): $email" + echo -e " Home: $home" + echo -e " Config: $config" + echo -e " Daemon: http://127.0.0.1:${port}" + done +} + +seed_rpc_keepfiles() { + log_header "Seeding RPC keep files (.syftkeep)" + for email in "${CLIENTS[@]}"; do + local home config data_dir + home="$(client_field "$email" home)" || { log_error "No client home for $email"; exit 1; } + config="$(client_field "$email" config)" || { log_error "No config path for $email"; exit 1; } + data_dir="$(parse_data_dir "$config")" + [[ -z "$data_dir" ]] && { log_error "Could not read data_dir from $config"; exit 1; } + + for target in "${CLIENTS[@]}"; do + local rpc_dir="$data_dir/datasites/$target/app_data/biovault/rpc" + mkdir -p "$rpc_dir/message" 2>/dev/null || true + touch "$rpc_dir/.syftkeep" 2>/dev/null || true + touch "$rpc_dir/message/.syftkeep" 2>/dev/null || true + + local shadow_rpc="$data_dir/unencrypted/$target/app_data/biovault/rpc" + mkdir -p "$shadow_rpc/message" 2>/dev/null || true + touch "$shadow_rpc/.syftkeep" 2>/dev/null || true + touch "$shadow_rpc/message/.syftkeep" 2>/dev/null || true + done + done + log_info "RPC keep files seeded (best-effort)" +} + +provision_identities() { + log_header "Skipping Syft Crypto provisioning (use onboarding flow)" +} + +run_initial_sync() { + log_header "Skipping initial BioVault syncs (onboarding will handle setup)" +} + +preseed_onboarding_for_bootstrap() { + log_header "Pre-seeding onboarding state for bootstrap (skip onboarding UI)" + + for email in "${CLIENTS[@]}"; do + local home config server data_dir + home="$(client_field "$email" home)" || { log_error "No client home for $email"; exit 1; } + config="$(client_field "$email" config)" || { log_error "No config path for $email"; exit 1; } + server="$(client_field "$email" server)" + [[ -z "$server" ]] && server="$SERVER_URL" + data_dir="$(parse_data_dir "$config")" + [[ -z "$data_dir" ]] && data_dir="$home" + + mkdir -p "$home" + log_info "Pre-onboarding $email via bv init --quiet" + env \ + BIOVAULT_HOME="$home" \ + BIOVAULT_DISABLE_PROFILES=1 \ + SYFTBOX_SERVER_URL="$server" \ + SYFTBOX_CONFIG_PATH="$config" \ + SYFTBOX_DATA_DIR="$data_dir" \ + SYC_VAULT="$data_dir/.syc" \ + "$BV_CLI_BIN" init --quiet "$email" >/dev/null + done + + log_success "Pre-seeded onboarding for ${#CLIENTS[@]} clients" +} + +wait_for_peer_did() { + local data_dir="$1" + local peer="$2" + local timeout_s="${3:-20}" + local did_path="$data_dir/datasites/$peer/public/crypto/did.json" + local deadline=$((SECONDS + timeout_s)) + while (( SECONDS < deadline )); do + if [[ -f "$did_path" ]]; then + return 0 + fi + sleep 1 + done + return 1 +} + +import_peer_contacts() { + log_header "Importing peer key bundles for all clients" + local imported=0 + local missing=0 + + for email in "${CLIENTS[@]}"; do + local config data_dir bundles_dir + config="$(client_field "$email" config)" || { log_error "No config path for $email"; exit 1; } + data_dir="$(parse_data_dir "$config")" + [[ -z "$data_dir" ]] && { log_error "Could not read data_dir from $config"; exit 1; } + bundles_dir="$data_dir/.biovault/vault/bundles" + mkdir -p "$bundles_dir" + + for peer in "${CLIENTS[@]}"; do + [[ "$peer" == "$email" ]] && continue + local did_path="$data_dir/datasites/$peer/public/crypto/did.json" + local bundle_path="$bundles_dir/$peer.json" + if ! wait_for_peer_did "$data_dir" "$peer" 30; then + log_warn "$email missing peer DID (not synced yet): $did_path" + ((missing += 1)) + continue + fi + cp "$did_path" "$bundle_path" + ((imported += 1)) + done + done + + if (( missing > 0 )); then + log_warn "Imported $imported bundles, $missing peer bundles missing (can refresh in app later)" + else + log_success "Imported $imported peer bundles" + fi +} + +run_bootstrap() { + local ws1 ws2 ws3 + ws1=$WS_PORT_BASE + ws2=$((WS_PORT_BASE + 1)) + ws3=$((WS_PORT_BASE + 2)) + local flow_name="${MULTIPARTY_FLOW_NAME:-multiparty}" + local project_file="${BOOTSTRAP_PROJECT_FILE:-${MULTIPARTY_BOOTSTRAP_PROJECT:-}}" + local project_name="${BOOTSTRAP_PROJECT_NAME:-${MULTIPARTY_BOOTSTRAP_PROJECT_NAME:-}}" + + log_header "Bootstrapping multiparty setup" + if [[ ! -f "$SCRIPT_DIR/scripts/bootstrap-three.mjs" ]]; then + log_error "Missing bootstrap helper: $SCRIPT_DIR/scripts/bootstrap-three.mjs" + exit 1 + fi + + local -a bootstrap_args=( + --ws1 "$ws1" --email1 "${CLIENTS[0]}" + --ws2 "$ws2" --email2 "${CLIENTS[1]}" + --ws3 "$ws3" --email3 "${CLIENTS[2]}" + --flow "$flow_name" + ) + if [[ -n "$project_file" ]]; then + bootstrap_args+=(--flow-file "$project_file") + fi + if [[ -n "$project_name" ]]; then + bootstrap_args+=(--flow-name "$project_name") + fi + if (( AUTO_RUN_FLAG )); then + bootstrap_args+=(--auto-run) + fi + if [[ -n "$STOP_BEFORE_STEP" ]]; then + bootstrap_args+=(--stop-before "$STOP_BEFORE_STEP") + fi + + node "$SCRIPT_DIR/scripts/bootstrap-three.mjs" "${bootstrap_args[@]}" + local mode_msg="Bootstrap complete (onboarding, trust, flow import, group invitation)" + if [[ -n "$project_file" ]]; then + mode_msg="Bootstrap complete (onboarding, trust, imported project flow + group invitation)" + fi + log_success "$mode_msg" +} + +launch_three_instances() { + if [[ ${#CLIENTS[@]} -lt 3 ]]; then + log_error "Three clients are required for multiparty mode" + exit 1 + fi + log_header "Launching THREE desktop windows for multiparty flow testing" + echo -e "${YELLOW} Roles:${NC}" + echo -e " client1@sandbox.local → contributor1" + echo -e " client2@sandbox.local → contributor2" + echo -e " aggregator@sandbox.local → aggregator" + echo "" + + if (( BOOTSTRAP_FLAG )); then + preseed_onboarding_for_bootstrap + launch_desktop_instance "${CLIENTS[2]}" 3 "bg" # aggregator + sleep 2 + launch_desktop_instance "${CLIENTS[1]}" 2 "bg" # client2 + sleep 2 + launch_desktop_instance "${CLIENTS[0]}" 1 "bg" # client1 + sleep 2 + run_bootstrap + cat </dev/null || true)" + if [[ -n "$stale_pids" ]]; then + log_warn "Killing stale workspace processes from previous runs: $stale_pids" + echo "$stale_pids" | xargs kill 2>/dev/null || true + sleep 1 + stale_pids="$(pgrep -f "$WORKSPACE_ROOT.*(bv-desktop|bv syftboxd|syqure|syftbox-rs)" 2>/dev/null || true)" + if [[ -n "$stale_pids" ]]; then + echo "$stale_pids" | xargs kill -9 2>/dev/null || true + fi + fi +} + +cleanup_on_exit() { + log_info "Shutting down all desktop processes..." + # Kill all background jobs spawned by this script + jobs -p 2>/dev/null | xargs -r kill 2>/dev/null || true + stop_desktops + kill_stale_processes + log_success "Cleanup complete" +} + +main() { + check_requirements + kill_stale_processes + + if (( STOP_FLAG )); then + stop_stack + exit 0 + fi + + # Ensure all child processes are cleaned up on Ctrl+C / exit + trap cleanup_on_exit INT TERM EXIT + + # Hotlink/TCP proxy env must be exported BEFORE devstack starts so the + # rust SyftBox daemons inherit them and enable hotlink transport. + # (test-scenario.sh does this at lines 349-393, before start_devstack) + export BV_SYFTBOX_HOTLINK="${BV_SYFTBOX_HOTLINK:-1}" + export BV_SYFTBOX_HOTLINK_SOCKET_ONLY="${BV_SYFTBOX_HOTLINK_SOCKET_ONLY:-1}" + export BV_SYFTBOX_HOTLINK_TCP_PROXY="${BV_SYFTBOX_HOTLINK_TCP_PROXY:-1}" + export BV_SYFTBOX_HOTLINK_QUIC="${BV_SYFTBOX_HOTLINK_QUIC:-1}" + export BV_SYFTBOX_HOTLINK_QUIC_ONLY="${BV_SYFTBOX_HOTLINK_QUIC_ONLY:-0}" + export BV_SYQURE_TCP_PROXY="${BV_SYQURE_TCP_PROXY:-1}" + export SYFTBOX_HOTLINK="${SYFTBOX_HOTLINK:-$BV_SYFTBOX_HOTLINK}" + export SYFTBOX_HOTLINK_SOCKET_ONLY="${SYFTBOX_HOTLINK_SOCKET_ONLY:-$BV_SYFTBOX_HOTLINK_SOCKET_ONLY}" + export SYFTBOX_HOTLINK_TCP_PROXY="${SYFTBOX_HOTLINK_TCP_PROXY:-$BV_SYFTBOX_HOTLINK_TCP_PROXY}" + export SYFTBOX_HOTLINK_QUIC="${SYFTBOX_HOTLINK_QUIC:-$BV_SYFTBOX_HOTLINK_QUIC}" + export SYFTBOX_HOTLINK_QUIC_ONLY="${SYFTBOX_HOTLINK_QUIC_ONLY:-$BV_SYFTBOX_HOTLINK_QUIC_ONLY}" + + # Force Tauri Rust backend recompile so dev always uses fresh code. + # cargo tauri dev watches src-tauri/src but may miss workspace deps. + log_info "Rebuilding Tauri backend (cargo build)..." + (cd "$SCRIPT_DIR/src-tauri" && cargo build 2>&1 | tail -1) + log_success "Tauri backend ready" + + start_stack + load_state + ensure_bv_cli + seed_rpc_keepfiles + provision_identities + run_initial_sync + import_peer_contacts + print_stack_summary + + if (( BOOTSTRAP_FLAG )) && (( SINGLE_MODE )); then + log_error "--bootstrap is only supported with three-client mode (no --single)" + exit 1 + fi + + if (( SINGLE_MODE )); then + launch_single_instance "$SINGLE_TARGET" + else + launch_three_instances + fi +} + +main diff --git a/plans/flows.md b/plans/flows.md index 46542a0e..b3f3910e 100644 --- a/plans/flows.md +++ b/plans/flows.md @@ -721,3 +721,172 @@ This prototype influenced the Flow spec's: - `../flow-spec-guide/tutorials/` - Progressive examples - `../flow-spec-guide/MIGRATION.md` - Detailed migration guide - `../biovault/pipelines.md` - Original design document (historical) + +--- + +## Multiparty Flow Implementation Progress (Feb 2026) + +### Problem Summary + +Multiparty flows broke after merging code from main. The issues stem from data structure mismatches between: + +- **FlowFileSpec** (YAML format): Contains `spec.datasites.groups` with role definitions +- **FlowSpec** (Rust struct): Flat `datasites: Vec` loses group information + +### Key Issues Fixed + +#### 1. Empty Steps Issue + +- **Cause**: `parse_flow_steps` looked for `flow_spec.spec.inputs.datasites.default` and `flow_spec.spec.datasites.groups` which don't exist in converted FlowSpec +- **Fix**: Build groups from participants instead of flow spec + +#### 2. Wrong Field Name + +- **Cause**: Code looked for `step.run.targets` but FlowSpec uses `step.runs_on` +- **Fix**: Check `runs_on` first, then fall back to `run.targets` + +#### 3. Resolved Emails Mismatch + +- **Cause**: `runs_on` contains default emails (e.g., "client1@sandbox.local") but actual participants have different emails +- **Fix**: Created default-to-actual email mapping by position + +### Code Changes in `src-tauri/src/commands/multiparty.rs` + +#### New Function: `build_group_map_from_participants` + +```rust +fn build_group_map_from_participants( + participants: &[FlowParticipant], + flow_spec: &serde_json::Value, +) -> (HashMap>, HashMap) { + let mut groups: HashMap> = HashMap::new(); + let mut default_to_actual: HashMap = HashMap::new(); + + // "all" group contains all participants + let all_emails: Vec = participants.iter().map(|p| p.email.clone()).collect(); + groups.insert("all".to_string(), all_emails.clone()); + + // Get default datasites from flow spec (for position mapping) + let default_datasites: Vec = flow_spec + .get("spec") + .and_then(|s| s.get("datasites")) + .and_then(|d| d.as_array()) + .map(|arr| arr.iter().filter_map(|v| v.as_str().map(String::from)).collect()) + .unwrap_or_default(); + + // Build groups based on roles + aggregate groups + let mut role_groups: HashMap> = HashMap::new(); + for (i, p) in participants.iter().enumerate() { + // Role-based group (e.g., "contributor1", "aggregator") + role_groups.entry(p.role.clone()).or_default().push(p.email.clone()); + + // Strip trailing digits for plural group (contributor1 -> contributors) + let base_role = p.role.trim_end_matches(|c: char| c.is_ascii_digit()); + if base_role != p.role { + let plural_role = format!("{}s", base_role); + role_groups.entry(plural_role).or_default().push(p.email.clone()); + } + + // Map default datasite email to actual participant email (by position) + if i < default_datasites.len() { + default_to_actual.insert(default_datasites[i].clone(), p.email.clone()); + } + } + + groups.extend(role_groups); + + // Also add "clients" as alias for "contributors" + if let Some(contributors) = groups.get("contributors").cloned() { + groups.insert("clients".to_string(), contributors); + } + + (groups, default_to_actual) +} +``` + +#### Updated `get_step_targets` + +```rust +fn get_step_targets(step: &serde_json::Value) -> Vec { + // Try converted FlowSpec structure first (runs_on) + if let Some(runs_on) = step.get("runs_on") { + match runs_on { + serde_json::Value::String(s) => return vec![s.clone()], + serde_json::Value::Array(arr) => { + return arr.iter().filter_map(|v| v.as_str().map(String::from)).collect(); + } + _ => {} + } + } + // Fallback to original YAML structure (run.targets) + if let Some(run) = step.get("run") { + if let Some(targets) = run.get("targets") { + // ... handle targets + } + } + // Barrier steps + if let Some(barrier) = step.get("barrier") { + // ... handle barrier + } + Vec::new() +} +``` + +#### Updated `my_action` Determination + +```rust +let my_action = if !targets.is_empty() { + targets.iter().any(|target| { + if target == my_email { return true; } + if let Some(group_members) = groups.get(target) { + if group_members.contains(&my_email.to_string()) { return true; } + } + // Check if target is a default datasite email that maps to my email + if let Some(actual_email) = default_to_actual.get(target) { + if actual_email == my_email { return true; } + } + false + }) +} else if is_barrier { true } else { false }; +``` + +### Reference Files + +#### `biovault/tests/scenarios/syqure-flow/flow.yaml` + +- Proper structure with `spec.datasites.groups` +- Groups like "aggregator" and "clients" with `include` arrays +- Steps use `run.targets: clients` or `run.targets: aggregator` + +#### `biovault/tests/scenarios/syqure-distributed.yaml` + +- Reference distributed test with parallel execution +- Uses `bv run` command for each participant +- Shows expected progress file structure + +### Goals + +1. **Unified Flow Syntax**: All flows should use same syntax as syqure-distributed +2. **SyftBox Sync**: Data should move via SyftBox (syft.pub.yaml/syft.sub.yaml) not shell scripts +3. **Single Code Path**: No separate code paths for single vs multiparty flows +4. **Robust Testing**: UI testing via websocket bridge + +### Test Command + +```bash +./test-scenario.sh --pipelines-multiparty --interactive +``` + +### Current Status + +- Code compiles without warnings +- Test infrastructure being set up (devstack with 3 clients) +- Need to verify flows execute correctly with group-based targeting +- Need to add syft.sub.yaml subscription when participants join + +### Next Steps + +1. Run tests until they pass +2. Verify data flows via SyftBox sync +3. Ensure flows use proper flow spec syntax +4. Add UI assertions via websocket bridge diff --git a/plans/multiparty.md b/plans/multiparty.md new file mode 100644 index 00000000..bfc0f017 --- /dev/null +++ b/plans/multiparty.md @@ -0,0 +1,475 @@ +# Multiparty Flow UX Plan (v2) + +## Overview + +Enable 3+ party collaborative flows with step-by-step manual/auto execution control. +Focus: UX for coordination, not computation complexity. + +## Key Design Decisions + +1. **Proposer assigns roles** - When sending invitation, proposer selects contacts for each role +2. **Execution on Runs tab** - Flow execution UI lives in Runs, not Messages +3. **Messages for invitations only** - Chat shows invitation card with "View in Runs" button +4. **Import vs Join** - Preview flow before joining, then join to participate + +## Components + +### 1. Simple Test Flow: `biovault/flows/multiparty` + +```yaml +apiVersion: syftbox.openmined.org/v1alpha1 +kind: Flow +metadata: + name: multiparty + version: 0.1.0 +spec: + multiparty: true + roles: + - id: contributor1 + description: First data contributor + - id: contributor2 + description: Second data contributor + - id: aggregator + description: Aggregates contributions + steps: + - id: generate + name: Generate Numbers + roles: [contributor1, contributor2] + shares_output: false + - id: share_contribution + name: Share Contribution + roles: [contributor1, contributor2] + shares_output: true + share_to: [aggregator] + depends_on: [generate] + - id: aggregate + name: Aggregate Sum + roles: [aggregator] + depends_on: [share_contribution] + wait_for_inputs: true + - id: share_result + name: Share Results + roles: [aggregator] + shares_output: true + share_to: [contributor1, contributor2] + depends_on: [aggregate] +``` + +### 2. Propose Flow UI (Messages) + +When user wants to propose a multiparty flow: + +1. Open "Propose Flow" modal from Messages or Flows +2. Select a multiparty flow +3. Assign contacts to roles +4. Send invitation + +``` +┌─────────────────────────────────────────────────┐ +│ Propose Multiparty Flow │ +├─────────────────────────────────────────────────┤ +│ Flow: multiparty ▼ │ +│ │ +│ Assign Participants to Roles: │ +│ ┌─────────────────┐ ┌──────────────────────┐ │ +│ │ contributor1 │→ │ client1@... ▼│ │ +│ │ contributor2 │→ │ client2@... ▼│ │ +│ │ aggregator │→ │ Me (proposer) │ │ +│ └─────────────────┘ └──────────────────────┘ │ +│ │ +│ Message: "Let's run this multiparty flow!" │ +│ │ +│ [Cancel] [Send Invitation] │ +└─────────────────────────────────────────────────┘ +``` + +### 3. Invitation Card (Messages) + +Recipients see in chat: + +``` +┌─────────────────────────────────────────────────┐ +│ 🔄 Flow Invitation: multiparty │ +├─────────────────────────────────────────────────┤ +│ From: aggregator@sandbox.local │ +│ Your role: contributor1 │ +│ │ +│ Participants: │ +│ 👤 client1@... → contributor1 (you) │ +│ 👤 client2@... → contributor2 │ +│ 👤 aggregator@... → aggregator │ +│ │ +│ [View Details] [Join in Runs →] │ +└─────────────────────────────────────────────────┘ +``` + +### 4. Multiparty Sessions (Runs Tab) + +New section at top of Runs tab: + +``` +┌─────────────────────────────────────────────────┐ +│ Active Multiparty Sessions │ +├─────────────────────────────────────────────────┤ +│ ┌───────────────────────────────────────────┐ │ +│ │ 🔄 multiparty │ │ +│ │ Your role: contributor1 │ │ +│ │ Status: 2/4 steps complete │ │ +│ │ [Open Session] │ │ +│ └───────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────┘ +``` + +### 5. Session Execution Panel (Runs Tab) + +When session is opened: + +``` +┌─────────────────────────────────────────────────┐ +│ 🔄 Multiparty: multiparty │ +│ Session: session-1234567890 │ +├─────────────────────────────────────────────────┤ +│ Participants: │ +│ ✅ client1@... (contributor1) - you │ +│ ✅ client2@... (contributor2) - joined │ +│ ✅ aggregator@... (aggregator) - joined │ +├─────────────────────────────────────────────────┤ +│ Your Steps: │ +│ ┌───────────────────────────────────────────┐ │ +│ │ Step 1: Generate Numbers │ │ +│ │ Status: ✅ Completed │ │ +│ ├───────────────────────────────────────────┤ │ +│ │ Step 2: Share Contribution │ │ +│ │ Status: Ready to share │ │ +│ │ [📁 View Files] [Share to aggregator →] │ │ +│ └───────────────────────────────────────────┘ │ +│ │ +│ Other Steps (view only): │ +│ • Aggregate Sum (aggregator) - Waiting │ +│ • Share Results (aggregator) - Pending │ +└─────────────────────────────────────────────────┘ +``` + +### 6. Step Execution States + +``` +pending → waiting for dependencies +ready → dependencies met, can run +running → currently executing +completed → finished, outputs ready +ready_to_share → completed, can share outputs +sharing → outputs being encrypted/sent +shared → outputs delivered +waiting_inputs → waiting for other parties +failed → error occurred +``` + +### 7. Backend Commands + +```rust +// Propose a flow (creates invitation message) +propose_multiparty_flow { + flow_name: String, + participants: Vec<{email, role}>, + message: String, +} + +// Accept invitation (joins session) +accept_flow_invitation { + session_id: String, + flow_name: String, + flow_spec: Value, + participants: Vec, +} + +// Get session state +get_multiparty_flow_state { session_id: String } + +// Run a step +run_flow_step { session_id: String, step_id: String } + +// Share step outputs +share_step_outputs { session_id: String, step_id: String } + +// List active sessions +list_multiparty_sessions {} +``` + +### 8. Implementation Steps + +Phase 1: Propose Flow UI + +- [ ] Add "Propose Flow" button in Messages (group chat only) +- [ ] Create ProposeFlowModal component +- [ ] Load multiparty flows, show role assignment UI +- [ ] Send invitation via send_message with flow_invitation metadata + +Phase 2: Invitation Card Updates + +- [ ] Update invitation card to show "Join in Runs" button +- [ ] Navigate to Runs tab on click, open session panel + +Phase 3: Runs Tab Integration + +- [ ] Add "Active Multiparty Sessions" section +- [ ] List sessions user is participating in +- [ ] Create session execution panel +- [ ] Wire up Run/Share buttons + +Phase 4: Polish + +- [ ] Real-time status updates between participants +- [ ] Error handling and retry +- [ ] Session completion state + +## Test Script + +```bash +./dev-three.sh --reset +# Opens 3 BioVault windows: +# client1@sandbox.local (contributor1) +# client2@sandbox.local (contributor2) +# aggregator@sandbox.local (aggregator) + +# Manual testing: +# 1. In aggregator window, go to Messages +# 2. Create group chat with client1, client2 +# 3. Click "Propose Flow" → select multiparty → assign roles +# 4. Send invitation +# 5. In client1/client2 windows, see invitation → click "Join in Runs" +# 6. Each participant steps through their assigned steps +``` + +# Multiparty Flow Improvements + +## Completed + +### UI Basics + +- [x] Button visibility - Run buttons only show for Ready steps +- [x] Aggregator doesn't see buttons until their turn +- [x] "Join Flow" → "View Flow" after joining +- [x] Hide "Decline" button once joined +- [x] Preview remains visible after share + +### Cross-Client Sync (Partial) + +- [x] Poll state files from MPC sharing folder (sandbox fallback) +- [x] Update aggregate step when contributors share (WaitingForInputs → Ready) +- [x] Aggregate reads contributor data (currently from sandbox direct paths) +- [x] Write progress.json when steps complete/share + +### Progress & UI (Session 2) + +- [x] Progress shows total flow progress: "X/4 steps complete" +- [x] Everyone sees same progress count (4 total steps) +- [x] Shows "Done" when all steps complete +- [x] syft.pub.yaml created when sharing (for SyftBox sync) +- [x] Participant chips show full email with checkbox status +- [x] All participants shown on each step (☑ completed, ☐ pending, greyed = not involved) +- [x] Preview button opens folder in OS file manager +- [x] Activity Log moved to tab (instead of collapsible section) +- [x] Added `roles` field to StepState for frontend + +--- + +## TODO - Remaining Items + +### 3. Participant Header - Show Current Stage + +- [ ] At top where participant cards are shown +- [ ] Show which step each participant is currently on +- [ ] e.g., "contributor1 - Step 2: Share Contribution" + +### 6. Aggregator Reads from Synced Paths (Not Direct) + +- [ ] Currently reading from sandbox direct paths (fallback hack) +- [ ] Should read from properly synced SyftBox paths +- [ ] e.g., `{my_home}/datasites/{contributor}/shared/flows/...` +- [ ] This requires SyftBox to be running and syncing + +### 7. Run Completion Status + +- [ ] When ALL steps complete for ALL participants → mark run as "Done" +- [ ] Currently shows "RUNNING" even when everything is done +- [ ] Update run status badge + +### 9. Clickable Participant Bubbles + +- [ ] Click participant card → see their current state/view +- [ ] Show their progress through steps +- [ ] "View" buttons should show files they shared with you (if available) + +--- + +## File Paths Reference + +**Where contributors write:** + +``` +{contributor_home}/datasites/{contributor}/shared/flows/{flow}/{session}/{step}/ +``` + +**Where aggregator should read (after SyftBox sync):** + +``` +{aggregator_home}/datasites/{contributor}/shared/flows/{flow}/{session}/{step}/ +``` + +**syft.pub.yaml structure:** + +```yaml +read: + - aggregator@sandbox.local +``` + +--- + +## Priority Order (Remaining) + +1. **Run completion status** - UX polish (show "Done" badge) +2. **Participant stage indicator** - UX enhancement +3. **Clickable participant cards** - UX enhancement +4. **Synced paths for aggregator** - When SyftBox is running + +--- + +## Implementation Progress (Feb 2026) + +### Problem: Empty Steps After Main Merge + +After merging code from main, multiparty flows broke with empty steps. Root cause: data structure mismatch between YAML and Rust. + +### Data Structure Issue + +| Format | Structure | Problem | +| ----------------------- | --------------------------------------------- | ------------------------- | +| **FlowFileSpec** (YAML) | `spec.datasites.groups` with role definitions | Full group info | +| **FlowSpec** (Rust) | `datasites: Vec` flat list | Groups lost in conversion | + +### Fixes Applied to `src-tauri/src/commands/multiparty.rs` + +#### 1. Build Groups from Participants (not flow spec) + +```rust +fn build_group_map_from_participants( + participants: &[FlowParticipant], + flow_spec: &serde_json::Value, +) -> (HashMap>, HashMap) +``` + +Creates: + +- `"all"` → all participant emails +- `"contributor1"`, `"contributor2"`, `"aggregator"` → role-based groups +- `"contributors"` → plural group from `contributorN` roles +- `"clients"` → alias for contributors +- `default_to_actual` map → position-based email mapping + +#### 2. Check `runs_on` First (FlowSpec format) + +```rust +fn get_step_targets(step: &serde_json::Value) -> Vec { + // Try FlowSpec format first + if let Some(runs_on) = step.get("runs_on") { ... } + // Fallback to YAML format + if let Some(run) = step.get("run") { ... } +} +``` + +#### 3. Default-to-Actual Email Mapping + +The flow spec has default emails like `client1@sandbox.local`, but actual participants may have different emails. Fixed by mapping by position: + +```rust +// Map default datasite email to actual participant email +if i < default_datasites.len() { + default_to_actual.insert(default_datasites[i].clone(), p.email.clone()); +} +``` + +#### 4. Updated `my_action` Logic + +```rust +let my_action = targets.iter().any(|target| { + if target == my_email { return true; } + if let Some(group_members) = groups.get(target) { + if group_members.contains(&my_email.to_string()) { return true; } + } + // Check default-to-actual mapping + if let Some(actual_email) = default_to_actual.get(target) { + if actual_email == my_email { return true; } + } + false +}) +``` + +### Test Command + +```bash +./test-scenario.sh --pipelines-multiparty --interactive +``` + +### Current Status + +- ✅ Code compiles without warnings +- ✅ Groups built from participants correctly +- ✅ `runs_on` field handled properly +- ✅ Default email → actual email mapping works +- 🔄 Testing multiparty execution flow +- ⏳ SyftBox sync (syft.pub.yaml/syft.sub.yaml) data transfer + +### Goal: Unified Flow Syntax + +All flows should use same syntax as `syqure-distributed`: + +- Data moves via SyftBox sync, not shell scripts +- No separate code paths for single vs multiparty +- Steps target groups like `clients`, `aggregator` + +--- + +## Checkpoint: Recent Commit Summary and Next Steps (Feb 7, 2026) + +### Recent commits on `madhava/multiparty` + +- `70445d6` (`multiparty`) + - Broad multiparty stack updates across backend/frontend/tests. + - Includes updates in: + - `src-tauri/src/commands/multiparty.rs` + - `src-tauri/src/commands/messages.rs` + - `src-tauri/src/commands/flows.rs` + - `src/runs.js`, `src/messages.js`, `src/css/runs.css` + - `tests/ui/messages-multiparty-flow.spec.ts` + - `tests/ui/syqure-multiparty-flow.spec.ts` (added) + - `test-scenario.sh` +- `0b5ed43` (`multiparty working`) + - Earlier stabilization pass for multiparty execution/sync/UI behavior. + +### Current verified status + +- `./test-scenario.sh --pipelines-multiparty-flow --interactive` passes after latest multiparty fixes. +- Final aggregate sharing is asserted to be secure and visible to participants in multiparty UI flow checks. +- `./test-scenario.sh tests/scenarios/syqure-distributed.yaml` is reported working (long runtime is expected, ~7+ minutes). + +### Current Syqure UI blocker + +- `./test-scenario.sh --syqure-multiparty-flow --interactive` currently fails early in flow-copy verification. +- Failure is in `tests/ui/syqure-multiparty-flow.spec.ts` where the test compares raw `flow.yaml` text to copied content. +- Received payload appears encrypted/enveloped in this path, so raw text equality is too strict for this scenario. + +### Uncommitted work in progress + +- `test-scenario.sh` + - Added `BV_SYQURE_AGG_MODE` switch (`smpc`/`he`) for `--syqure-multiparty-flow`, mirroring distributed-mode selection. +- `tests/ui/syqure-multiparty-flow.spec.ts` + - Added stronger convergence/sync/share assertions. + - Aligned `runId` with `sessionId` for consistent shared-path observation. + +### Next steps (execution order) + +1. Fix Syqure UI flow-copy assertion to validate semantic flow equivalence (or parse/decrypt envelope) instead of strict raw-text equality. +2. Re-run `./test-scenario.sh --syqure-multiparty-flow --interactive` and confirm it advances past invitation/import/join into full run. +3. Re-run `./test-scenario.sh tests/scenarios/syqure-distributed.yaml` as parity check on same flow artifacts and runtime mode. +4. Keep multiparty regression guard green by re-running `./test-scenario.sh --pipelines-multiparty-flow --interactive`. +5. If Syqure runtime stalls after assertion fix, separate UI/sync pass criteria from Syqure runtime pass criteria and gate with explicit known-issue marker until runtime patch is available. diff --git a/scripts/bootstrap-three.mjs b/scripts/bootstrap-three.mjs new file mode 100644 index 00000000..a545caaf --- /dev/null +++ b/scripts/bootstrap-three.mjs @@ -0,0 +1,528 @@ +#!/usr/bin/env node + +import WebSocket from 'ws' +import fs from 'node:fs' +import path from 'node:path' + +function parseArgs(argv) { + const args = new Map() + for (let i = 2; i < argv.length; i += 1) { + const token = argv[i] + if (!token.startsWith('--')) continue + const key = token.slice(2) + const next = argv[i + 1] + if (!next || next.startsWith('--')) { + args.set(key, '1') + } else { + args.set(key, next) + i += 1 + } + } + return args +} + +function sleep(ms) { + return new Promise((resolve) => setTimeout(resolve, ms)) +} + +async function importContactWithRetry(backend, fromLabel, identity, attempts = 40) { + for (let attempt = 1; attempt <= attempts; attempt += 1) { + try { + await backend.invoke('network_import_contact', { identity }) + return + } catch (err) { + if (attempt === attempts) throw err + if (attempt % 5 === 0) { + console.log( + `[bootstrap] ${fromLabel}: still waiting for ${identity} DID (${attempt}/${attempts})`, + ) + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await sleep(1000) + } + } +} + +async function connectBackend(port, label, timeoutMs = 120_000) { + const start = Date.now() + while (Date.now() - start < timeoutMs) { + try { + const socket = new WebSocket(`ws://localhost:${port}`) + await new Promise((resolve, reject) => { + const timeout = setTimeout(() => reject(new Error('connect timeout')), 5000) + socket.once('open', () => { + clearTimeout(timeout) + resolve() + }) + socket.once('error', (err) => { + clearTimeout(timeout) + reject(err) + }) + }) + let nextId = 0 + const pending = new Map() + socket.on('message', (data) => { + let parsed + try { + parsed = JSON.parse(data.toString()) + } catch { + return + } + const entry = pending.get(parsed?.id) + if (!entry) return + pending.delete(parsed.id) + if (parsed.error) entry.reject(new Error(parsed.error)) + else entry.resolve(parsed.result) + }) + + async function invoke(cmd, args = {}, cmdTimeoutMs = 30_000) { + const id = ++nextId + socket.send(JSON.stringify({ id, cmd, args })) + return await new Promise((resolve, reject) => { + pending.set(id, { resolve, reject }) + setTimeout(() => { + if (!pending.has(id)) return + pending.delete(id) + reject(new Error(`WS invoke timeout: ${cmd}`)) + }, cmdTimeoutMs) + }) + } + + async function close() { + if (socket.readyState !== WebSocket.OPEN) return + await new Promise((resolve) => { + socket.once('close', resolve) + socket.close() + }) + } + + console.log(`[bootstrap] connected ${label} on ws:${port}`) + return { invoke, close } + } catch { + await sleep(1000) + } + } + throw new Error(`failed to connect ${label} on ws:${port} within ${timeoutMs}ms`) +} + +function buildFlowSpec(flowName, client1, client2, aggregator) { + return { + apiVersion: 'syftbox.openmined.org/v1alpha1', + kind: 'Flow', + metadata: { + name: flowName, + version: '0.1.0', + }, + spec: { + vars: { + flow_path: 'syft://{datasite.current}/shared/flows/{flow_name}', + run_path: '{vars.flow_path}/{run_id}', + step_path: '{vars.run_path}/{step.number}-{step.id}', + }, + coordination: { + url: '{vars.run_path}/_progress', + share_with: 'all', + }, + datasites: { + all: [aggregator, client1, client2], + groups: { + aggregator: { include: [aggregator] }, + contributors: { include: [client1, client2] }, + }, + }, + roles: [ + { id: 'contributor1', description: 'First data contributor' }, + { id: 'contributor2', description: 'Second data contributor' }, + { id: 'aggregator', description: 'Aggregates contributions' }, + ], + steps: [ + { + id: 'generate', + name: 'Generate Numbers', + description: 'Generate random numbers locally', + run: { targets: 'contributors', strategy: 'parallel' }, + share: { + numbers_shared: { + source: 'self.outputs.numbers', + url: '{vars.step_path}/numbers.json', + permissions: { read: [aggregator] }, + }, + }, + }, + { + id: 'contributions_ready', + name: 'Wait for Contributions', + description: 'Wait for all contributors to share', + barrier: { + wait_for: 'generate', + targets: 'contributors', + timeout: 300, + }, + }, + { + id: 'aggregate', + name: 'Aggregate Sum', + description: 'Compute sum of all contributions', + run: { targets: 'aggregator' }, + depends_on: ['contributions_ready'], + share: { + result_shared: { + source: 'self.outputs.result', + url: '{vars.step_path}/result.json', + permissions: { read: [client1, client2, aggregator] }, + }, + }, + }, + ], + }, + } +} + +function inferFlowNameFromYamlFile(flowFilePath) { + const text = fs.readFileSync(flowFilePath, 'utf8') + const metadataBlock = text.match(/metadata:\s*([\s\S]*?)(?:\n[a-zA-Z_][a-zA-Z0-9_]*:|\n$)/m) + if (metadataBlock) { + const m = metadataBlock[1].match(/^\s*name:\s*["']?([A-Za-z0-9._-]+)["']?\s*$/m) + if (m?.[1]) return m[1] + } + const topLevel = text.match(/^\s*name:\s*["']?([A-Za-z0-9._-]+)["']?\s*$/m) + if (topLevel?.[1]) return topLevel[1] + return path.basename(flowFilePath).replace(/\.(ya?ml)$/i, '') +} + +async function importFlowProject(backends, flowFilePath) { + await Promise.all( + backends.map(async ({ backend, email }) => { + await backend.invoke( + 'import_flow', + { + flowFile: flowFilePath, + overwrite: true, + }, + 120_000, + ) + console.log(`[bootstrap] imported flow file on ${email}: ${flowFilePath}`) + }), + ) +} + +async function resolveInvitationFlowSpec(backend, flowName) { + const flows = await backend.invoke('get_flows', {}) + const flow = (flows || []).find((entry) => entry?.name === flowName) + if (!flow?.spec) { + throw new Error(`flow "${flowName}" not found after import`) + } + return flow.spec +} + +async function waitForStepStatus(backend, sessionId, stepId, expectedStatuses, label, timeoutMs = 300_000) { + const start = Date.now() + let lastStatus = '' + while (Date.now() - start < timeoutMs) { + try { + const state = await backend.invoke('get_multiparty_flow_state', { sessionId }, 120_000) + const step = (state?.steps || []).find((s) => s?.id === stepId) + const status = step?.status ? String(step.status) : '' + if (status) { + lastStatus = status + if (expectedStatuses.includes(status)) return + if (status === 'Failed') { + throw new Error(`${label}: step "${stepId}" entered Failed state`) + } + } + } catch (error) { + if (String(error).includes('Failed state')) throw error + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await sleep(1500) + } + throw new Error(`${label}: timed out waiting for "${stepId}" [${expectedStatuses}] (last=${lastStatus})`) +} + +async function runStepAndWait(backend, sessionId, stepId, label, timeoutMs = 300_000) { + const start = Date.now() + const rpcTimeout = 120_000 + while (Date.now() - start < timeoutMs) { + try { + await backend.invoke('run_flow_step', { sessionId, stepId }, rpcTimeout) + console.log(`[auto-run] ${label}: started ${stepId}`) + break + } catch (error) { + const msg = String(error) + if (/step is not ready to run \(status:\s*(completed|shared|running)\)/i.test(msg)) { + console.log(`[auto-run] ${label}: ${stepId} already running/done`) + break + } + if ( + /dependency.*not satisfied/i.test(msg) || + /not ready to run.*waiting/i.test(msg) || + /WS invoke timeout/i.test(msg) + ) { + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await sleep(1500) + continue + } + throw error + } + } + await waitForStepStatus(backend, sessionId, stepId, ['Completed', 'Shared'], label, timeoutMs) + console.log(`[auto-run] ${label}: ${stepId} completed`) +} + +async function shareStepAndWait(backend, sessionId, stepId, label, timeoutMs = 300_000) { + const rpcTimeout = 120_000 + try { + await backend.invoke('share_step_outputs', { sessionId, stepId }, rpcTimeout) + console.log(`[auto-run] ${label}: shared ${stepId}`) + } catch (error) { + if (!/WS invoke timeout/i.test(String(error))) throw error + console.log(`[auto-run] ${label}: share timeout (transient), waiting for status...`) + } + await waitForStepStatus(backend, sessionId, stepId, ['Shared'], label, timeoutMs) + console.log(`[auto-run] ${label}: ${stepId} shared`) +} + +async function autoRunFlowSteps(backends, sessionId, flowSpec, participants, stopBefore) { + const { b1, b2, b3, e1, e2, e3 } = backends + + console.log('[auto-run] accepting invitation on all backends...') + const threadId = null + await Promise.all([ + b1.invoke('accept_flow_invitation', { + sessionId, flowName: flowSpec?.metadata?.name || 'flow', + flowSpec, participants, autoRunAll: false, threadId, + }, 120_000), + b2.invoke('accept_flow_invitation', { + sessionId, flowName: flowSpec?.metadata?.name || 'flow', + flowSpec, participants, autoRunAll: false, threadId, + }, 120_000), + b3.invoke('accept_flow_invitation', { + sessionId, flowName: flowSpec?.metadata?.name || 'flow', + flowSpec, participants, autoRunAll: false, threadId, + }, 120_000), + ]) + console.log('[auto-run] all backends accepted') + + await sleep(2000) + await b1.invoke('trigger_syftbox_sync').catch(() => {}) + await b2.invoke('trigger_syftbox_sync').catch(() => {}) + await b3.invoke('trigger_syftbox_sync').catch(() => {}) + + // Extract step ids from flow spec + const steps = flowSpec?.spec?.steps || flowSpec?.steps || [] + const stepIds = steps.map((s) => s.id).filter(Boolean) + console.log(`[auto-run] flow steps: ${stepIds.join(', ')}`) + if (stopBefore) { + console.log(`[auto-run] will stop before: ${stopBefore}`) + } + + for (const stepDef of steps) { + const sid = stepDef.id + if (!sid) continue + if (stopBefore && sid === stopBefore) { + console.log(`[auto-run] stopping before step: ${sid}`) + break + } + + // Skip barrier steps - they auto-resolve + if (stepDef.barrier) { + console.log(`[auto-run] skipping barrier: ${sid}`) + continue + } + + const targets = stepDef.run?.targets || '' + const isClients = targets === 'clients' || targets === 'contributors' + const isAggregator = targets === 'aggregator' + const isAll = targets === 'all' + + if (isClients) { + console.log(`[auto-run] running ${sid} on clients...`) + await Promise.all([ + runStepAndWait(b1, sessionId, sid, e1), + runStepAndWait(b2, sessionId, sid, e2), + ]) + if (stepDef.share) { + console.log(`[auto-run] sharing ${sid} from clients...`) + await Promise.all([ + shareStepAndWait(b1, sessionId, sid, e1), + shareStepAndWait(b2, sessionId, sid, e2), + ]) + await b3.invoke('trigger_syftbox_sync').catch(() => {}) + await sleep(2000) + } + } else if (isAggregator) { + console.log(`[auto-run] running ${sid} on aggregator...`) + await runStepAndWait(b3, sessionId, sid, e3) + if (stepDef.share) { + console.log(`[auto-run] sharing ${sid} from aggregator...`) + await shareStepAndWait(b3, sessionId, sid, e3) + await b1.invoke('trigger_syftbox_sync').catch(() => {}) + await b2.invoke('trigger_syftbox_sync').catch(() => {}) + await sleep(2000) + } + } else if (isAll) { + console.log(`[auto-run] running ${sid} on all...`) + await Promise.all([ + runStepAndWait(b1, sessionId, sid, e1), + runStepAndWait(b2, sessionId, sid, e2), + runStepAndWait(b3, sessionId, sid, e3), + ]) + if (stepDef.share) { + console.log(`[auto-run] sharing ${sid} from all...`) + await Promise.all([ + shareStepAndWait(b1, sessionId, sid, e1), + shareStepAndWait(b2, sessionId, sid, e2), + shareStepAndWait(b3, sessionId, sid, e3), + ]) + } + } + } + + console.log('[auto-run] done') +} + +async function ensureOnboarded(backend, email, label) { + const onboarded = await backend.invoke('check_is_onboarded') + if (onboarded) { + console.log(`[bootstrap] ${label}: already onboarded`) + return + } + console.log(`[bootstrap] ${label}: completing onboarding...`) + await backend.invoke('complete_onboarding', { email }, 180_000) + for (let attempt = 0; attempt < 30; attempt += 1) { + const ok = await backend.invoke('check_is_onboarded').catch(() => false) + if (ok) { + console.log(`[bootstrap] ${label}: onboarding complete`) + return + } + await sleep(1000) + } + throw new Error(`${label}: onboarding did not complete`) +} + +async function main() { + const args = parseArgs(process.argv) + const ws1 = Number(args.get('ws1') || '3333') + const ws2 = Number(args.get('ws2') || '3334') + const ws3 = Number(args.get('ws3') || '3335') + const email1 = args.get('email1') || 'client1@sandbox.local' + const email2 = args.get('email2') || 'client2@sandbox.local' + const email3 = args.get('email3') || 'aggregator@sandbox.local' + const requestedFlowName = args.get('flow') || 'multiparty' + const flowFileArg = args.get('flow-file') || '' + const explicitFlowName = args.get('flow-name') || '' + const autoRun = args.has('auto-run') + const stopBefore = args.get('stop-before') || '' + const flowFilePath = flowFileArg ? path.resolve(process.cwd(), flowFileArg) : '' + if (flowFilePath && !fs.existsSync(flowFilePath)) { + throw new Error(`flow file not found: ${flowFilePath}`) + } + + const backend1 = await connectBackend(ws1, email1) + const backend2 = await connectBackend(ws2, email2) + const backend3 = await connectBackend(ws3, email3) + try { + await ensureOnboarded(backend1, email1, email1) + await ensureOnboarded(backend2, email2, email2) + await ensureOnboarded(backend3, email3, email3) + + await backend1.invoke('trigger_syftbox_sync').catch(() => {}) + await backend2.invoke('trigger_syftbox_sync').catch(() => {}) + await backend3.invoke('trigger_syftbox_sync').catch(() => {}) + await sleep(1500) + + console.log('[bootstrap] importing contacts...') + await importContactWithRetry(backend1, email1, email2) + await importContactWithRetry(backend1, email1, email3) + await importContactWithRetry(backend2, email2, email1) + await importContactWithRetry(backend2, email2, email3) + await importContactWithRetry(backend3, email3, email1) + await importContactWithRetry(backend3, email3, email2) + + let flowName = requestedFlowName + let flowSpec = null + if (flowFilePath) { + flowName = explicitFlowName || inferFlowNameFromYamlFile(flowFilePath) + console.log(`[bootstrap] importing project flow "${flowName}" from ${flowFilePath}`) + await importFlowProject( + [ + { backend: backend1, email: email1 }, + { backend: backend2, email: email2 }, + { backend: backend3, email: email3 }, + ], + flowFilePath, + ) + flowSpec = await resolveInvitationFlowSpec(backend3, flowName) + } else { + flowSpec = buildFlowSpec(flowName, email1, email2, email3) + console.log(`[bootstrap] importing flow "${flowName}" on aggregator...`) + await backend3.invoke( + 'import_flow_from_json', + { + request: { + name: flowName, + flow_json: flowSpec, + overwrite: true, + }, + }, + 60_000, + ) + } + + const sessionId = `session-${Date.now()}` + // Order must match flow.yaml inputs.datasites.default: [aggregator, client1, client2] + // so that {datasites[0]} = aggregator, {datasites[1]} = client1, etc. + const participants = [ + { email: email3, role: 'aggregator' }, + { email: email1, role: 'contributor1' }, + { email: email2, role: 'contributor2' }, + ] + console.log('[bootstrap] creating group thread + flow invitation...') + const invitation = await backend3.invoke( + 'send_message', + { + request: { + recipients: [email1, email2], + body: `Join me in a multiparty flow! Flow: ${flowName} - ${Date.now()}`, + subject: `Multiparty Flow: ${flowName}`, + metadata: { + flow_invitation: { + flow_name: flowName, + session_id: sessionId, + participants, + flow_spec: flowSpec, + }, + }, + }, + }, + 60_000, + ) + + await backend1.invoke('sync_messages_with_failures').catch(() => {}) + await backend2.invoke('sync_messages_with_failures').catch(() => {}) + await backend3.invoke('sync_messages_with_failures').catch(() => {}) + + console.log( + `[bootstrap] done: thread=${invitation?.thread_id || 'unknown'} session=${sessionId}`, + ) + + if (autoRun) { + await autoRunFlowSteps( + { b1: backend1, b2: backend2, b3: backend3, e1: email1, e2: email2, e3: email3 }, + sessionId, + flowSpec, + participants, + stopBefore || null, + ) + } + } finally { + await backend1.close().catch(() => {}) + await backend2.close().catch(() => {}) + await backend3.close().catch(() => {}) + } +} + +main().catch((err) => { + console.error(`[bootstrap] failed: ${err?.message || String(err)}`) + process.exit(1) +}) diff --git a/scripts/fetch-bundled-deps.sh b/scripts/fetch-bundled-deps.sh index 290581db..dd38cc4c 100755 --- a/scripts/fetch-bundled-deps.sh +++ b/scripts/fetch-bundled-deps.sh @@ -14,6 +14,7 @@ fi ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" OUT_ROOT="${OUT_ROOT:-"$ROOT_DIR/src-tauri/resources/bundled"}" +SYQURE_OUT_DIR="${SYQURE_OUT_DIR:-"$ROOT_DIR/src-tauri/resources/syqure"}" CONFIG_FILE="${BUNDLED_CONFIG:-"$ROOT_DIR/scripts/bundled-deps.json"}" # Parse command line arguments @@ -373,6 +374,50 @@ fetch_uv() { rm -rf "$tmpdir" } +fetch_syqure() { + local os="$1" + if [[ "$os" == "windows" ]]; then + echo "⚠️ Skipping syqure fetch for Windows" + return + fi + + mkdir -p "$SYQURE_OUT_DIR" + rm -f "$SYQURE_OUT_DIR/syqure" + + local local_bin="${SYQURE_LOCAL_BIN:-}" + if [[ -z "$local_bin" ]]; then + local candidates=( + "$ROOT_DIR/syqure/target/release/syqure" + "$ROOT_DIR/../syqure/target/release/syqure" + "$ROOT_DIR/syqure/target/debug/syqure" + "$ROOT_DIR/../syqure/target/debug/syqure" + ) + for candidate in "${candidates[@]}"; do + if [[ -x "$candidate" ]]; then + local_bin="$candidate" + break + fi + done + fi + + if [[ -n "$local_bin" && -x "$local_bin" ]]; then + cp "$local_bin" "$SYQURE_OUT_DIR/syqure" + chmod +x "$SYQURE_OUT_DIR/syqure" + echo "✅ syqure bundled from local binary: $local_bin" + return + fi + + if [[ -n "${SYQURE_DOWNLOAD_URL:-}" ]]; then + echo "⬇️ Fetching syqure from SYQURE_DOWNLOAD_URL" + download_with_retry "$SYQURE_DOWNLOAD_URL" "$SYQURE_OUT_DIR/syqure" 3 3 + chmod +x "$SYQURE_OUT_DIR/syqure" + echo "✅ syqure bundled from download URL" + return + fi + + echo "⚠️ syqure binary not bundled (no local build and SYQURE_DOWNLOAD_URL unset)" +} + main() { local os arch read -r os arch <<<"$(detect_platform)" @@ -386,7 +431,7 @@ main() { # Clean existing bundled dependencies if requested if [[ "$CLEAN_FIRST" == true ]]; then echo "🧹 Cleaning existing bundled dependencies..." - rm -rf "$OUT_ROOT/java" "$OUT_ROOT/nextflow" "$OUT_ROOT/uv" + rm -rf "$OUT_ROOT/java" "$OUT_ROOT/nextflow" "$OUT_ROOT/uv" "$SYQURE_OUT_DIR" echo "✅ Cleaned bundled directories" fi @@ -397,6 +442,7 @@ main() { extract_java "$os" "$arch" fetch_nextflow "$os" "$arch" fetch_uv "$os" "$arch" + fetch_syqure "$os" echo "🔧 Fixing file permissions and removing quarantine attributes..." diff --git a/scripts/setup-workspace.sh b/scripts/setup-workspace.sh index 598c9f6c..721a49e9 100755 --- a/scripts/setup-workspace.sh +++ b/scripts/setup-workspace.sh @@ -74,12 +74,19 @@ clone_if_missing_async() { local name="$1" local url="$2" local branch="${3:-}" + local optional="${4:-0}" if [[ -d "$PARENT_DIR/$name" ]]; then echo "$name already exists at $PARENT_DIR/$name" return 0 fi ( - clone_if_missing "$name" "$url" "$branch" + if ! clone_if_missing "$name" "$url" "$branch"; then + if [[ "$optional" == "1" ]]; then + echo "⚠️ Optional dependency '$name' clone failed; continuing without it." + exit 0 + fi + exit 1 + fi ) & CLONE_PIDS+=($!) } @@ -108,6 +115,12 @@ clone_if_missing_async "syftbox" "https://github.com/OpenMined/syftbox.git" "mad clone_if_missing_async "biovault-beaver" "https://github.com/OpenMined/biovault-beaver.git" clone_if_missing_async "sbenv" "https://github.com/OpenMined/sbenv.git" clone_if_missing_async "bioscript" "https://github.com/OpenMined/bioscript.git" +if [[ "${BV_SKIP_SYQURE:-0}" == "1" || "${SKIP_SYQURE_CLONE:-0}" == "1" ]]; then + echo "Skipping syqure clone (BV_SKIP_SYQURE=${BV_SKIP_SYQURE:-0}, SKIP_SYQURE_CLONE=${SKIP_SYQURE_CLONE:-0})" +else + # syqure may be private/inaccessible in some CI contexts; treat as optional here. + clone_if_missing_async "syqure" "https://github.com/madhavajay/syqure.git" "" "1" +fi wait_for_clones # Setup nested dependencies for syftbox-sdk @@ -143,6 +156,9 @@ create_symlink "syftbox" create_symlink "biovault-beaver" create_symlink "sbenv" create_symlink "bioscript" +if [[ -d "$PARENT_DIR/syqure" ]]; then + create_symlink "syqure" +fi echo "" echo "Workspace setup complete!" diff --git a/scripts/sign-bundled-deps.sh b/scripts/sign-bundled-deps.sh index af2166c4..cbc4fd77 100755 --- a/scripts/sign-bundled-deps.sh +++ b/scripts/sign-bundled-deps.sh @@ -74,6 +74,15 @@ if [[ -f "$SYQURE_DIR/syqure" ]]; then fi echo "" +# Sign codon/sequre libs bundled with syqure +echo "Signing syqure codon/sequre libs..." +if [[ -d "$SYQURE_DIR/lib/codon" ]]; then + find "$SYQURE_DIR/lib/codon" -type f \( -name "*.dylib" -o -name "*.so" -o -perm +111 \) | while read -r bin; do + sign_binary "$bin" || true + done +fi +echo "" + # Sign uv echo "Signing uv binaries..." if [[ -d "$BUNDLED_DIR/uv" ]]; then diff --git a/scripts/watch_syqure_multiparty.py b/scripts/watch_syqure_multiparty.py new file mode 100644 index 00000000..ef1e2c1f --- /dev/null +++ b/scripts/watch_syqure_multiparty.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 +"""Live watcher for Syqure multiparty UI runs. + +Prints per-peer step status, proxy listener readiness, and hotlink rx/tx deltas. +""" + +from __future__ import annotations + +import argparse +import json +import socket +import subprocess +import sys +import time +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +STEP_ORDER = ["gen_variants", "build_master", "align_counts", "secure_aggregate"] + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Watch Syqure multiparty progress.") + parser.add_argument( + "--sandbox", + default="biovault/sandbox", + help="Sandbox root directory (default: biovault/sandbox).", + ) + parser.add_argument( + "--flow", + default="syqure-flow", + help="Flow name under shared/flows (default: syqure-flow).", + ) + parser.add_argument( + "--participants", + default="", + help="Comma-separated participant emails (default: auto-discover *@sandbox.local).", + ) + parser.add_argument( + "--interval", + type=float, + default=2.0, + help="Refresh interval seconds (default: 2.0).", + ) + parser.add_argument( + "--once", + action="store_true", + help="Print one snapshot and exit.", + ) + parser.add_argument( + "--prefix", + default="[watch]", + help="Prefix for output lines.", + ) + return parser.parse_args() + + +def role_for_email(email: str) -> str: + return email.split("@", 1)[0] + + +def discover_participants(sandbox: Path) -> List[str]: + emails: List[str] = [] + if not sandbox.exists(): + return emails + for child in sandbox.iterdir(): + if not child.is_dir(): + continue + name = child.name + if "@" not in name: + continue + if (child / "datasites" / name).exists(): + emails.append(name) + return sorted(emails) + + +def read_json(path: Path) -> Optional[dict]: + try: + return json.loads(path.read_text()) + except Exception: + return None + + +def latest_session(sandbox: Path, flow: str, emails: List[str]) -> Optional[str]: + candidates: List[Tuple[float, str]] = [] + for email in emails: + flow_root = sandbox / email / "datasites" / email / "shared" / "flows" / flow + if not flow_root.exists(): + continue + for session_dir in flow_root.glob("session-*"): + if not session_dir.is_dir(): + continue + progress_log = session_dir / "_progress" / "log.jsonl" + ts = progress_log.stat().st_mtime if progress_log.exists() else session_dir.stat().st_mtime + candidates.append((ts, session_dir.name)) + if not candidates: + return None + candidates.sort(key=lambda item: item[0], reverse=True) + return candidates[0][1] + + +def parse_step_statuses(progress_log: Path) -> Dict[str, str]: + statuses: Dict[str, str] = {} + if not progress_log.exists(): + return statuses + try: + lines = progress_log.read_text().splitlines() + except Exception: + return statuses + for line in lines[-500:]: + try: + entry = json.loads(line) + except Exception: + continue + step_id = entry.get("step_id") + event = (entry.get("event") or "").strip() + if not step_id: + continue + if event == "step_started": + statuses[step_id] = "Running" + elif event == "step_completed": + statuses[step_id] = "Completed" + elif event == "step_shared": + statuses[step_id] = "Shared" + elif event == "step_failed": + statuses[step_id] = "Failed" + elif event == "syqure_proxy_ready": + statuses[f"{step_id}.proxy"] = "Ready" + return statuses + + +def read_private_step_line(path: Path) -> str: + if not path.exists(): + return "-" + try: + lines = path.read_text().splitlines() + except Exception: + return "-" + if not lines: + return "-" + return lines[-1] + + +def is_listening(port: int) -> bool: + try: + proc = subprocess.run( + ["lsof", "-nP", f"-iTCP:{int(port)}", "-sTCP:LISTEN"], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + text=True, + check=False, + ) + lines = [line for line in (proc.stdout or "").splitlines() if line.strip()] + if proc.returncode == 0 and len(lines) >= 2: + return True + except FileNotFoundError: + pass + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(0.08) + try: + return sock.connect_ex(("127.0.0.1", int(port))) == 0 + except Exception: + return False + finally: + sock.close() + + +def parse_mpc_channels(session_root: Path, email: str) -> str: + mpc_root = session_root / "_mpc" + if not mpc_root.exists(): + return "-" + parts: List[str] = [] + for channel_dir in sorted(mpc_root.iterdir()): + if not channel_dir.is_dir() or "_to_" not in channel_dir.name: + continue + stream_tcp = read_json(channel_dir / "stream.tcp") or {} + ports = stream_tcp.get("ports") if isinstance(stream_tcp.get("ports"), dict) else {} + local_port = ports.get(email) if isinstance(ports, dict) else None + if local_port is None: + local_port = stream_tcp.get("port") + try: + local_port = int(local_port) if local_port is not None else None + except Exception: + local_port = None + accept_flag = (channel_dir / "stream.accept").read_text().strip() == "1" if (channel_dir / "stream.accept").exists() else False + listening_flag = is_listening(local_port) if isinstance(local_port, int) else False + state = f"{'L' if listening_flag else 'x'}{'A' if accept_flag else '-'}" + parts.append(f"{channel_dir.name}:{state}:{local_port if local_port else '-'}") + return ", ".join(parts) if parts else "-" + + +def fmt_bytes(value: int) -> str: + units = ["B", "KB", "MB", "GB", "TB"] + val = float(value) + idx = 0 + while val >= 1024.0 and idx < len(units) - 1: + val /= 1024.0 + idx += 1 + if idx == 0: + return f"{int(val)}{units[idx]}" + return f"{val:.1f}{units[idx]}" + + +def telemetry_line( + telemetry: Optional[dict], + prev: Optional[dict], +) -> Tuple[str, Optional[dict]]: + if not telemetry: + return "hotlink:-", prev + tx = int(telemetry.get("tx_bytes") or 0) + rx = int(telemetry.get("rx_bytes") or 0) + tx_packets = int(telemetry.get("tx_packets") or 0) + rx_packets = int(telemetry.get("rx_packets") or 0) + mode = str(telemetry.get("mode") or "unknown") + updated_ms = int(telemetry.get("updated_ms") or 0) + delta_tx = 0 + delta_rx = 0 + delta_t = 0 + if prev: + delta_tx = max(0, tx - int(prev.get("tx_bytes") or 0)) + delta_rx = max(0, rx - int(prev.get("rx_bytes") or 0)) + delta_t = max(0, updated_ms - int(prev.get("updated_ms") or 0)) + line = ( + f"hotlink:{mode} tx={fmt_bytes(tx)} rx={fmt_bytes(rx)} " + f"pkts={tx_packets}/{rx_packets} +{fmt_bytes(delta_tx)}/{fmt_bytes(delta_rx)}" + ) + if delta_t > 0: + line += f" dt={delta_t}ms" + return line, {"tx_bytes": tx, "rx_bytes": rx, "updated_ms": updated_ms} + + +def snapshot( + prefix: str, + sandbox: Path, + flow: str, + emails: List[str], + prev_tel: Dict[str, dict], + emit_wait: bool, +) -> Tuple[Dict[str, dict], bool]: + session = latest_session(sandbox, flow, emails) + ts = time.strftime("%H:%M:%S") + if session or emit_wait: + print(f"{prefix} {ts} flow={flow} session={session or '-'}") + if not session: + if emit_wait: + print(f"{prefix} waiting for session-* in {sandbox}") + return prev_tel, False + + next_tel: Dict[str, dict] = dict(prev_tel) + for email in emails: + role = role_for_email(email) + session_root = sandbox / email / "datasites" / email / "shared" / "flows" / flow / session + progress_dir = session_root / "_progress" + statuses = parse_step_statuses(progress_dir / "log.jsonl") + steps = [] + for step in STEP_ORDER: + value = statuses.get(step, "-") + if step == "secure_aggregate" and statuses.get("secure_aggregate.proxy") == "Ready": + value = f"{value}+proxy" + steps.append(f"{step}={value}") + channel_text = parse_mpc_channels(session_root, email) + + telemetry_path = sandbox / email / "datasites" / email / ".syftbox" / "hotlink_telemetry.json" + telemetry = read_json(telemetry_path) + telem_text, telem_state = telemetry_line(telemetry, prev_tel.get(email)) + if telem_state: + next_tel[email] = telem_state + + private_log = ( + sandbox + / email + / ".biovault" + / "multiparty_step_logs" + / session + / "secure_aggregate.log" + ) + secure_tail = read_private_step_line(private_log) + print( + f"{prefix} {role:<10} {' '.join(steps)} | {telem_text} | mpc[{channel_text}]" + ) + print(f"{prefix} {role:<10} secure_tail: {secure_tail}") + + return next_tel, True + + +def main() -> int: + args = parse_args() + sandbox = Path(args.sandbox).expanduser().resolve() + if args.participants.strip(): + emails = [p.strip() for p in args.participants.split(",") if p.strip()] + else: + emails = discover_participants(sandbox) + + if not emails: + print(f"{args.prefix} no participants found under {sandbox}", file=sys.stderr) + return 1 + + prev_tel: Dict[str, dict] = {} + had_session = False + last_wait_log_at = 0.0 + while True: + now = time.time() + emit_wait = False + if not had_session and (last_wait_log_at <= 0.0 or (now - last_wait_log_at) >= 30.0): + emit_wait = True + prev_tel, has_session = snapshot( + args.prefix, + sandbox, + args.flow, + emails, + prev_tel, + emit_wait, + ) + if emit_wait and not has_session: + last_wait_log_at = now + had_session = has_session + if args.once: + return 0 + time.sleep(max(args.interval, 0.2)) + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/windows/bundle-deps.ps1 b/scripts/windows/bundle-deps.ps1 index e23bb41d..d394cc84 100644 --- a/scripts/windows/bundle-deps.ps1 +++ b/scripts/windows/bundle-deps.ps1 @@ -239,6 +239,14 @@ New-Item -ItemType Directory -Force -Path $nxfDest | Out-Null Set-Content -Path (Join-Path $javaDest "README.txt") -Value "Java runs via Docker container on Windows" -Encoding UTF8 Set-Content -Path (Join-Path $nxfDest "README.txt") -Value "Nextflow runs via Docker container on Windows" -Encoding UTF8 +# Syqure placeholder (uses Docker container on Windows) +$syqureDest = Join-Path $repoRoot "src-tauri\resources\syqure" +New-Item -ItemType Directory -Force -Path $syqureDest | Out-Null +if (-not (Test-Path (Join-Path $syqureDest "syqure.exe"))) { + Set-Content -Path (Join-Path $syqureDest "syqure.exe") -Value "" -Encoding UTF8 +} +Set-Content -Path (Join-Path $syqureDest "README.txt") -Value "Syqure runs via Docker container on Windows" -Encoding UTF8 + <# # DISABLED: Java bundling - not needed on Windows (Docker provides it) Write-Host "== Fetching Java (Temurin JRE) ==" diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index b5fd8ffe..a74ecc49 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -237,9 +237,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.37" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d10e4f991a553474232bc0a31799f6d24b034a84c0971d80d2e2f78b2e576e40" +checksum = "68650b7df54f0293fd061972a0fb05aaf4fc0879d3b3d21a638a182c5c543b9f" dependencies = [ "compression-codecs", "compression-core", @@ -465,6 +465,28 @@ dependencies = [ "arrayvec", ] +[[package]] +name = "aws-lc-rs" +version = "1.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c34dda4df7017c8db52132f0f8a2e0f8161649d15723ed63fc00c82d0f2081a" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + [[package]] name = "axum" version = "0.7.9" @@ -1034,6 +1056,15 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" +[[package]] +name = "cmake" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -1901,6 +1932,18 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastbloom" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e7f34442dbe69c60fe8eaf58a8cafff81a1f278816d8ab4db255b3bef4ac3c4" +dependencies = [ + "getrandom 0.3.4", + "libm", + "rand 0.9.2", + "siphasher 1.0.2", +] + [[package]] name = "fastrand" version = "2.3.0" @@ -2042,6 +2085,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "fsevent-sys" version = "4.1.0" @@ -3546,6 +3595,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "libm" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" + [[package]] name = "libredox" version = "0.1.12" @@ -3755,9 +3810,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.7.6" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "memoffset" @@ -4576,6 +4631,16 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" +[[package]] +name = "pem" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" +dependencies = [ + "base64 0.22.1", + "serde_core", +] + [[package]] name = "percent-encoding" version = "2.3.2" @@ -5146,6 +5211,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", + "fastbloom", "getrandom 0.3.4", "lru-slab", "rand 0.9.2", @@ -5153,6 +5219,7 @@ dependencies = [ "rustc-hash", "rustls 0.23.36", "rustls-pki-types", + "rustls-platform-verifier", "slab", "thiserror 2.0.18", "tinyvec", @@ -5375,6 +5442,19 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rcgen" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "yasna", +] + [[package]] name = "redox_syscall" version = "0.5.18" @@ -5508,16 +5588,16 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-streams", + "wasm-streams 0.4.2", "web-sys", "webpki-roots 1.0.6", ] [[package]] name = "reqwest" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04e9018c9d814e5f30cc16a0f03271aeab3571e609612d9fe78c1aa8d11c2f62" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" dependencies = [ "base64 0.22.1", "bytes", @@ -5548,7 +5628,7 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-streams", + "wasm-streams 0.5.0", "web-sys", ] @@ -5701,6 +5781,7 @@ version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ + "aws-lc-rs", "once_cell", "ring", "rustls-pki-types", @@ -5775,6 +5856,7 @@ version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -5788,9 +5870,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "ryu-js" @@ -6515,10 +6597,13 @@ dependencies = [ "md5", "notify", "once_cell", + "quinn", + "rcgen", "regex", "reqwest 0.12.28", "rmp-serde", "rusqlite", + "rustls 0.23.36", "serde", "serde_json", "serde_yaml", @@ -6533,7 +6618,7 @@ dependencies = [ [[package]] name = "syftbox-sdk" -version = "0.1.17" +version = "0.1.18" dependencies = [ "anyhow", "base64 0.22.1", @@ -6743,7 +6828,7 @@ dependencies = [ "percent-encoding", "plist", "raw-window-handle", - "reqwest 0.13.1", + "reqwest 0.13.2", "serde", "serde_json", "serde_repr", @@ -7007,7 +7092,7 @@ dependencies = [ "minisign-verify", "osakit", "percent-encoding", - "reqwest 0.13.1", + "reqwest 0.13.2", "rustls 0.23.36", "semver", "serde", @@ -7852,9 +7937,9 @@ checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +checksum = "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e" [[package]] name = "unicode-normalization" @@ -8127,6 +8212,19 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasm-streams" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1ec4f6517c9e11ae630e200b2b65d193279042e28edd4a2cda233e46670bbb" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "web-sys" version = "0.3.85" @@ -8980,6 +9078,15 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5a4b21e1a62b67a2970e6831bc091d7b87e119e7f9791aef9702e3bef04448" +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + [[package]] name = "yoke" version = "0.8.1" @@ -9066,18 +9173,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.38" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57cf3aa6855b23711ee9852dfc97dfaa51c45feaba5b645d0c777414d494a961" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.38" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a616990af1a287837c4fe6596ad77ef57948f787e46ce28e166facc0cc1cb75" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", @@ -9183,9 +9290,9 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff05f8caa9038894637571ae6b9e29466c1f4f829d26c9b28f869a29cbe3445" +checksum = "4de98dfa5d5b7fef4ee834d0073d560c9ca7b6c46a71d058c48db7960f8cfaf7" [[package]] name = "zune-core" diff --git a/src-tauri/build.rs b/src-tauri/build.rs index 3383f3ae..f5f732c9 100644 --- a/src-tauri/build.rs +++ b/src-tauri/build.rs @@ -127,10 +127,16 @@ fn main() { let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR must be set")); - // Ensure bundled resources directory exists so glob in tauri.conf.json always matches. + // Ensure bundled resources directories exist so glob in tauri.conf.json always matches. let bundled_dir = manifest_dir.join("resources").join("bundled"); ensure_placeholder_dir(&bundled_dir, "placeholder.txt") .expect("failed to ensure resources/bundled"); + ensure_placeholder_dir(&bundled_dir.join("java"), "placeholder.txt") + .expect("failed to ensure resources/bundled/java"); + ensure_placeholder_dir(&bundled_dir.join("nextflow"), "placeholder.txt") + .expect("failed to ensure resources/bundled/nextflow"); + ensure_placeholder_dir(&bundled_dir.join("uv"), "placeholder.txt") + .expect("failed to ensure resources/bundled/uv"); let syqure_dir = manifest_dir.join("resources").join("syqure"); ensure_placeholder_dir(&syqure_dir, "placeholder.txt") .expect("failed to ensure resources/syqure"); diff --git a/src-tauri/src/commands/flows.rs b/src-tauri/src/commands/flows.rs index 480b113f..3b9d1635 100644 --- a/src-tauri/src/commands/flows.rs +++ b/src-tauri/src/commands/flows.rs @@ -13,7 +13,6 @@ use walkdir::WalkDir; // Use CLI library types and functions use biovault::cli::commands::flow::run_flow as cli_run_flow; -use biovault::cli::commands::module_management::{resolve_flow_dependencies, DependencyContext}; use biovault::data::BioVaultDb; pub use biovault::data::{Flow, Run, RunConfig}; pub use biovault::flow_spec::FlowSpec; @@ -295,6 +294,50 @@ fn clear_nextflow_locks( Ok(total_removed) } +fn copy_local_flow_dir(src: &Path, dest: &Path) -> Result<(), String> { + fs::create_dir_all(dest).map_err(|e| format!("Failed to create destination: {}", e))?; + + for entry in WalkDir::new(src) + .min_depth(1) + .follow_links(false) + .into_iter() + .filter_map(|e| e.ok()) + { + let path = entry.path(); + let rel = path + .strip_prefix(src) + .map_err(|e| format!("Failed to resolve path: {}", e))?; + + if should_skip_request_path(rel) { + continue; + } + + let dest_path = dest.join(rel); + if entry.file_type().is_dir() { + fs::create_dir_all(&dest_path).map_err(|e| { + format!("Failed to create directory {}: {}", dest_path.display(), e) + })?; + continue; + } + + if let Some(parent) = dest_path.parent() { + fs::create_dir_all(parent) + .map_err(|e| format!("Failed to create directory {}: {}", parent.display(), e))?; + } + + fs::copy(path, &dest_path).map_err(|e| { + format!( + "Failed to copy {} to {}: {}", + path.display(), + dest_path.display(), + e + ) + })?; + } + + Ok(()) +} + fn list_nextflow_locks(flow_path: &Path) -> Vec { let nextflow_dir = flow_path.join(".nextflow"); if !nextflow_dir.exists() { @@ -1341,7 +1384,6 @@ pub async fn create_flow( } } - let mut flow_yaml_path = flow_dir.join(FLOW_YAML_FILE); let mut imported_spec: Option = None; // If importing from a file, always copy to managed directory (like GitHub imports) @@ -1392,43 +1434,12 @@ pub async fn create_flow( .map_err(|e| format!("Failed to create flow directory: {}", e))?; flow_dir = managed_flow_dir.clone(); - flow_yaml_path = managed_flow_dir.join(FLOW_YAML_FILE); - - // Resolve and import dependencies - // Use spawn_blocking because BioVaultDb is not Send - // base_path is the directory containing flow.yaml (where module.yaml might also be) - let dependency_context = DependencyContext::Local { - base_path: source_parent.to_path_buf(), // This is already the directory containing flow.yaml - }; - let flow_yaml_path_clone = flow_yaml_path.clone(); - - let flow_result = tauri::async_runtime::spawn_blocking(move || { - tauri::async_runtime::block_on(async { - let spec = FlowFile::parse_yaml(&yaml_str) - .map_err(|e| format!("Failed to parse flow.yaml: {}", e))?; - let mut spec = spec - .to_flow_spec() - .map_err(|e| format!("Failed to convert flow spec: {}", e))?; - resolve_flow_dependencies( - &mut spec, - &dependency_context, - &flow_yaml_path_clone, - overwrite, - true, // quiet = true for Tauri (no console output) - ) - .await - .map_err(|e| e.to_string())?; - Ok::(spec) - }) - }) - .await - .map_err(|e| format!("Failed to spawn dependency resolution: {}", e))?; - - let spec = flow_result.map_err(|e| format!("Failed to resolve dependencies: {}", e))?; - // Note: resolve_flow_dependencies already saves the spec (with description preserved) - imported_spec = Some(spec); + // Preserve full flow directory contents (including local modules/assets). + copy_local_flow_dir(source_parent, &managed_flow_dir)?; + imported_spec = flow.to_flow_spec().ok(); } else { + let flow_yaml_path = flow_dir.join(FLOW_YAML_FILE); fs::create_dir_all(&flow_dir) .map_err(|e| format!("Failed to create flow directory: {}", e))?; @@ -1462,6 +1473,8 @@ pub async fn create_flow( let default_spec = FlowSpec { name: name.clone(), description: None, + multiparty: None, + roles: Vec::new(), context: None, vars: Default::default(), coordination: None, @@ -1528,6 +1541,106 @@ pub async fn create_flow( }) } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ImportFlowFromJsonRequest { + pub name: String, + pub flow_json: serde_json::Value, + #[serde(default)] + pub overwrite: bool, +} + +#[tauri::command] +pub async fn import_flow_from_json( + state: tauri::State<'_, AppState>, + request: ImportFlowFromJsonRequest, +) -> Result { + let ImportFlowFromJsonRequest { + name, + flow_json, + overwrite, + } = request; + + let flows_dir = get_flows_dir()?; + fs::create_dir_all(&flows_dir) + .map_err(|e| format!("Failed to create flows directory: {}", e))?; + + let flow_dir = flows_dir.join(&name); + + // Always allow overwrite for invitation imports - check DB first + let biovault_db = state.biovault_db.lock().map_err(|e| e.to_string())?; + let flow_dir_str = flow_dir.to_string_lossy().to_string(); + + // Check if flow already exists in DB + let existing = biovault_db + .list_flows() + .map_err(|e| e.to_string())? + .into_iter() + .find(|p| p.name == name || p.flow_path == flow_dir_str); + + if let Some(existing_flow) = existing { + // Flow already exists - return it (no need to re-import) + if !overwrite { + return Ok(existing_flow); + } + // Delete existing for overwrite + biovault_db + .delete_flow(existing_flow.id) + .map_err(|e| e.to_string())?; + } + + if flow_dir.exists() && overwrite { + fs::remove_dir_all(&flow_dir) + .map_err(|e| format!("Failed to remove existing flow directory: {}", e))?; + } + + fs::create_dir_all(&flow_dir).map_err(|e| format!("Failed to create flow directory: {}", e))?; + + // The flow_json might be a Flow object (from get_flows) or a FlowFile + // Try to extract the spec and build a proper FlowFile + let flow_file: FlowFile = if flow_json.get("apiVersion").is_some() { + // It's already a FlowFile format + serde_json::from_value(flow_json.clone()) + .map_err(|e| format!("Failed to parse FlowFile JSON: {}", e))? + } else if let Some(spec_value) = flow_json.get("spec") { + // It's a Flow object with a spec field - reconstruct FlowFile + let spec: FlowSpec = serde_json::from_value(spec_value.clone()) + .map_err(|e| format!("Failed to parse FlowSpec: {}", e))?; + FlowFile::from_flow_spec(&spec) + .map_err(|e| format!("Failed to build FlowFile from spec: {}", e))? + } else { + // Try to parse as FlowSpec directly + let spec: FlowSpec = serde_json::from_value(flow_json.clone()) + .map_err(|e| format!("Failed to parse as FlowSpec: {}", e))?; + FlowFile::from_flow_spec(&spec) + .map_err(|e| format!("Failed to build FlowFile from spec: {}", e))? + }; + + let yaml_content = serde_yaml::to_string(&flow_file) + .map_err(|e| format!("Failed to convert flow to YAML: {}", e))?; + + let flow_yaml_path = flow_dir.join(FLOW_YAML_FILE); + fs::write(&flow_yaml_path, &yaml_content) + .map_err(|e| format!("Failed to write flow.yaml: {}", e))?; + + // Parse spec for return value + let imported_spec = flow_file.to_flow_spec().ok(); + + let id = biovault_db + .register_flow(&name, &flow_dir_str) + .map_err(|e| e.to_string())?; + + let timestamp = chrono::Local::now().to_rfc3339(); + + Ok(Flow { + id, + name, + flow_path: flow_dir_str, + created_at: timestamp.clone(), + updated_at: timestamp, + spec: imported_spec, + }) +} + #[tauri::command] pub async fn load_flow_editor( state: tauri::State<'_, AppState>, @@ -2609,7 +2722,7 @@ pub async fn get_flow_runs(state: tauri::State<'_, AppState>) -> Result pub async fn delete_flow_run(state: tauri::State<'_, AppState>, run_id: i64) -> Result<(), String> { let biovault_db = state.biovault_db.lock().map_err(|e| e.to_string())?; - // Get work directory before deleting + // Get run details before deleting let run = biovault_db .get_flow_run(run_id) .map_err(|e| e.to_string())?; @@ -2619,11 +2732,20 @@ pub async fn delete_flow_run(state: tauri::State<'_, AppState>, run_id: i64) -> .delete_flow_run(run_id) .map_err(|e| e.to_string())?; - // Delete work directory if it exists if let Some(r) = run { + // Clear multiparty session so the invitation can be re-accepted from messages + if let Some(ref metadata_str) = r.metadata { + if let Ok(metadata) = serde_json::from_str::(metadata_str) { + if let Some(sid) = metadata.get("session_id").and_then(|v| v.as_str()) { + super::multiparty::clear_multiparty_session(sid); + } + } + } + + // Delete work directory if it exists let path = PathBuf::from(r.work_dir); if path.exists() { - fs::remove_dir_all(&path).ok(); // Ignore errors here + fs::remove_dir_all(&path).ok(); } } @@ -2652,6 +2774,16 @@ pub async fn reconcile_flow_runs(state: tauri::State<'_, AppState>) -> Result<() if run.status != "running" && run.status != "paused" { continue; } + + // Skip multiparty runs - they don't have a process to track + if let Some(ref metadata_str) = run.metadata { + if let Ok(metadata) = serde_json::from_str::(metadata_str) { + if metadata.get("type").and_then(|v| v.as_str()) == Some("multiparty") { + continue; + } + } + } + let results_dir = run .results_dir .as_ref() diff --git a/src-tauri/src/commands/messages.rs b/src-tauri/src/commands/messages.rs index 1fe919f6..afaee5bf 100644 --- a/src-tauri/src/commands/messages.rs +++ b/src-tauri/src/commands/messages.rs @@ -4,6 +4,7 @@ use crate::types::{ }; use biovault::cli::commands::messages::{get_message_db_path, init_message_system}; use biovault::flow_spec::FlowFile; +use biovault::flow_spec::FlowModuleDef; use biovault::flow_spec::FlowSpec; use biovault::messages::{Message as VaultMessage, MessageDb, MessageStatus, MessageType}; use biovault::syftbox::storage::{SyftBoxStorage, WritePolicy}; @@ -36,6 +37,26 @@ fn parse_thread_filter(scope: Option<&str>) -> Result, + participants: &mut HashSet, +) { + let Some(meta) = metadata else { return }; + let Some(group_chat) = meta.get("group_chat") else { + return; + }; + let Some(group_participants) = group_chat.get("participants").and_then(|p| p.as_array()) else { + return; + }; + for email in group_participants { + if let Some(email) = email.as_str() { + if !email.trim().is_empty() { + participants.insert(email.trim().to_string()); + } + } + } +} + fn syftbox_storage(config: &biovault::config::Config) -> Result { let data_dir = config .get_syftbox_data_dir() @@ -126,17 +147,43 @@ fn copy_flow_folder( } fn collect_flow_modules( + flow_file: &FlowFile, spec: &FlowSpec, flow_root: &Path, db: &biovault::data::BioVaultDb, ) -> Result, String> { let mut modules = HashSet::new(); + let mut explicit_local_module_names = HashSet::new(); + + for (module_name, module_def) in &flow_file.spec.modules { + if let FlowModuleDef::Ref(module_ref) = module_def { + if let Some(source) = &module_ref.source { + if let Some(source_path) = source.path.as_ref().map(|p| p.trim()) { + if !source_path.is_empty() { + let candidate = if source_path.starts_with('/') { + PathBuf::from(source_path) + } else { + flow_root.join(source_path) + }; + if candidate.exists() { + explicit_local_module_names.insert(module_name.clone()); + modules.insert(candidate); + } + } + } + } + } + } for step in &spec.steps { let Some(uses) = step.uses.as_ref() else { continue; }; + if explicit_local_module_names.contains(uses) { + continue; + } + if uses.starts_with("http://") || uses.starts_with("https://") || uses.starts_with("syft://") @@ -476,6 +523,7 @@ pub fn list_message_threads( if !msg.to.is_empty() { participants.insert(msg.to.clone()); } + add_group_chat_participants(&msg.metadata, &mut participants); } let subject = last_msg @@ -563,6 +611,17 @@ pub fn get_thread_messages(thread_id: String) -> Result, Strin Ok(messages) } +/// Generate a deterministic thread ID for group chats based on sorted participants +fn generate_group_thread_id(participants: &[String]) -> String { + let mut sorted: Vec<&str> = participants.iter().map(|s| s.as_str()).collect(); + sorted.sort(); + let joined = sorted.join(","); + let mut hasher = Sha256::new(); + hasher.update(joined.as_bytes()); + let hash = hex::encode(hasher.finalize()); + format!("group-{}", &hash[..16]) +} + #[tauri::command] pub fn send_message(request: MessageSendRequest) -> Result { if request.body.trim().is_empty() { @@ -573,6 +632,104 @@ pub fn send_message(request: MessageSendRequest) -> Result let (db, sync) = init_message_system(&config) .map_err(|e| format!("Failed to initialize messaging: {}", e))?; + // Check if this is a group message (multiple recipients) + let recipients: Vec = request + .recipients + .as_ref() + .map(|r| { + r.iter() + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect() + }) + .unwrap_or_default(); + + // If we have multiple recipients, handle as group message + if recipients.len() > 1 || (recipients.len() == 1 && request.to.is_none()) { + let recipients = if recipients.is_empty() { + // Fall back to single `to` if recipients is empty + vec![request + .to + .clone() + .filter(|s| !s.trim().is_empty()) + .ok_or_else(|| "At least one recipient is required".to_string())?] + } else { + recipients + }; + + // Build the full participant list (sender + all recipients) + let mut all_participants: Vec = recipients.clone(); + all_participants.push(config.email.clone()); + all_participants.sort(); + all_participants.dedup(); + + // Generate deterministic thread ID for group + let group_thread_id = generate_group_thread_id(&all_participants); + + // Prepare metadata with group info + let mut base_metadata = request.metadata.clone().unwrap_or(serde_json::json!({})); + if let Some(obj) = base_metadata.as_object_mut() { + obj.insert( + "group_chat".to_string(), + serde_json::json!({ + "participants": all_participants, + "is_group": true, + }), + ); + } + + let mut first_message: Option = None; + + // Send to each recipient + for recipient in &recipients { + let mut message = VaultMessage::new( + config.email.clone(), + recipient.clone(), + request.body.clone(), + ); + + if let Some(subject) = request.subject.as_ref().filter(|s| !s.trim().is_empty()) { + message.subject = Some(subject.clone()); + } + + message.thread_id = Some(group_thread_id.clone()); + message.metadata = Some(base_metadata.clone()); + + if let Some(kind) = request + .message_type + .as_ref() + .map(|s| s.trim().to_lowercase()) + { + use biovault::messages::MessageType; + match kind.as_str() { + "text" | "" => { + message.message_type = MessageType::Text; + } + _ => { + message.message_type = MessageType::Text; + } + } + } + + db.insert_message(&message) + .map_err(|e| format!("Failed to store message: {}", e))?; + + sync.send_message(&message.id) + .map_err(|e| format!("Failed to send message to {}: {}", recipient, e))?; + + if first_message.is_none() { + let updated = db + .get_message(&message.id) + .map_err(|e| format!("Failed to reload message: {}", e))? + .unwrap_or(message); + first_message = Some(updated); + } + } + + return Ok(first_message.unwrap()); + } + + // Single recipient flow (existing logic) let mut message = if let Some(reply_id) = request.reply_to.as_ref() { let original = db .get_message(reply_id) @@ -580,6 +737,73 @@ pub fn send_message(request: MessageSendRequest) -> Result .ok_or_else(|| format!("Original message not found: {}", reply_id))?; let mut reply = VaultMessage::reply_to(&original, config.email.clone(), request.body.clone()); + + // For group chat replies, send to all participants except self + if let Some(meta) = original.metadata.as_ref() { + if let Some(group_chat) = meta.get("group_chat") { + if let Some(participants) = + group_chat.get("participants").and_then(|p| p.as_array()) + { + let other_participants: Vec = participants + .iter() + .filter_map(|p| p.as_str()) + .map(|s| s.to_string()) + .filter(|p| p != &config.email) + .collect(); + + if other_participants.len() > 1 { + // This is a group reply - send to all others + let group_thread_id = original.thread_id.clone().unwrap_or_else(|| { + let mut all: Vec = participants + .iter() + .filter_map(|p| p.as_str()) + .map(|s| s.to_string()) + .collect(); + all.sort(); + generate_group_thread_id(&all) + }); + + let mut base_metadata = + request.metadata.clone().unwrap_or(serde_json::json!({})); + if let Some(obj) = base_metadata.as_object_mut() { + obj.insert("group_chat".to_string(), group_chat.clone()); + } + + let mut first_message: Option = None; + + for recipient in &other_participants { + let mut msg = VaultMessage::new( + config.email.clone(), + recipient.clone(), + request.body.clone(), + ); + msg.thread_id = Some(group_thread_id.clone()); + msg.parent_id = Some(original.id.clone()); + msg.subject = original.subject.clone(); + msg.metadata = Some(base_metadata.clone()); + + db.insert_message(&msg) + .map_err(|e| format!("Failed to store message: {}", e))?; + + sync.send_message(&msg.id).map_err(|e| { + format!("Failed to send message to {}: {}", recipient, e) + })?; + + if first_message.is_none() { + let updated = db + .get_message(&msg.id) + .map_err(|e| format!("Failed to reload message: {}", e))? + .unwrap_or(msg); + first_message = Some(updated); + } + } + + return Ok(first_message.unwrap()); + } + } + } + } + // Allow callers to override the recipient even when sending a reply. // This is important for "threaded" app messages (e.g. session chat/accept/reject) // where we want to reply in-thread but still direct the message to the peer. @@ -928,11 +1152,19 @@ pub fn send_flow_request( .map_err(|e| format!("Failed to resolve submissions folder: {}", e))?; let timestamp = chrono::Local::now().format("%Y%m%d_%H%M%S").to_string(); + let unique_ms = Utc::now().timestamp_millis(); + let recipient_slug: String = recipient + .chars() + .map(|ch| if ch.is_ascii_alphanumeric() { ch } else { '-' }) + .collect(); let mut hasher = Sha256::new(); hasher.update(flow_content.as_bytes()); let flow_hash = hex::encode(hasher.finalize()); let short_hash = flow_hash.get(0..8).unwrap_or(&flow_hash).to_string(); - let submission_folder_name = format!("{}-{}-{}", flow_name, timestamp, short_hash); + let submission_folder_name = format!( + "{}-{}-{}-{}-{}", + flow_name, timestamp, short_hash, recipient_slug, unique_ms + ); let submission_path = submission_root.join(&submission_folder_name); copy_flow_folder( @@ -942,8 +1174,12 @@ pub fn send_flow_request( &recipient, )?; - let module_paths = - collect_flow_modules(&flow_spec_struct, Path::new(&flow.flow_path), &biovault_db)?; + let module_paths = collect_flow_modules( + &flow_file, + &flow_spec_struct, + Path::new(&flow.flow_path), + &biovault_db, + )?; let modules_dest_root = submission_path.join("modules"); let mut included_modules: Vec = Vec::new(); let mut seen_module_dirs = HashSet::new(); @@ -1571,6 +1807,7 @@ pub fn refresh_messages_batched( if !msg.to.is_empty() { participants.insert(msg.to.clone()); } + add_group_chat_participants(&msg.metadata, &mut participants); } let subject = last_msg diff --git a/src-tauri/src/commands/mod.rs b/src-tauri/src/commands/mod.rs index 8e2fab18..bcb8f385 100644 --- a/src-tauri/src/commands/mod.rs +++ b/src-tauri/src/commands/mod.rs @@ -8,6 +8,7 @@ pub mod key; pub mod logs; pub mod messages; pub mod modules; +pub mod multiparty; pub mod notifications; pub mod participants; pub mod profiles; diff --git a/src-tauri/src/commands/multiparty.rs b/src-tauri/src/commands/multiparty.rs new file mode 100644 index 00000000..c2e70a1d --- /dev/null +++ b/src-tauri/src/commands/multiparty.rs @@ -0,0 +1,5402 @@ +use crate::types::AppState; +use biovault::cli::commands::run_dynamic; +use biovault::messages::models::{FlowParticipant, MessageType}; +use biovault::subscriptions; +use chrono::{TimeZone, Utc}; +use once_cell::sync::Lazy; +use serde::{Deserialize, Serialize}; +use std::collections::{BTreeSet, HashMap, HashSet}; +use std::env; +use std::fs; +use std::io::{BufRead, BufReader, Write}; +use std::net::{Ipv4Addr, SocketAddrV4, TcpStream}; +use std::path::{Path, PathBuf}; + +use std::sync::Mutex; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +const SEQURE_COMMUNICATION_PORT_STRIDE: usize = 1000; + +fn flow_spec_root(flow_spec: &serde_json::Value) -> &serde_json::Value { + flow_spec.get("spec").unwrap_or(flow_spec) +} + +/// Get the owner's email from config +fn get_owner_email() -> Result { + let config = + biovault::config::Config::load().map_err(|e| format!("Failed to load config: {}", e))?; + Ok(config.email) +} + +/// Get the shared flow path for multiparty sessions +/// Structure: {biovault_home}/datasites/{owner}/shared/flows/{flow_name}/{session_id}/ +fn get_shared_flow_path(flow_name: &str, session_id: &str) -> Result { + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + let owner = get_owner_email()?; + + Ok(biovault_home + .join("datasites") + .join(&owner) + .join("shared") + .join("flows") + .join(flow_name) + .join(session_id)) +} + +fn load_multiparty_state_from_disk( + session_id: &str, +) -> Result, String> { + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + let owner = get_owner_email()?; + let flows_root = biovault_home + .join("datasites") + .join(&owner) + .join("shared") + .join("flows"); + + if !flows_root.exists() { + return Ok(None); + } + + let flow_dirs = fs::read_dir(&flows_root) + .map_err(|e| format!("Failed to read flows root {}: {}", flows_root.display(), e))?; + + for flow_entry in flow_dirs.flatten() { + let flow_dir = flow_entry.path(); + if !flow_dir.is_dir() { + continue; + } + let session_dir = flow_dir.join(session_id); + if !session_dir.is_dir() { + continue; + } + let state_path = session_dir.join("multiparty.state.json"); + if !state_path.exists() { + continue; + } + + let raw = fs::read_to_string(&state_path) + .map_err(|e| format!("Failed to read {}: {}", state_path.display(), e))?; + let mut parsed: MultipartyFlowState = serde_json::from_str(&raw) + .map_err(|e| format!("Failed to parse {}: {}", state_path.display(), e))?; + + // Ensure work_dir is valid after app restarts / path migrations. + parsed.work_dir = Some(session_dir); + return Ok(Some(parsed)); + } + + Ok(None) +} + +fn state_file_for_flow(flow_state: &MultipartyFlowState) -> Result { + if let Some(work_dir) = flow_state.work_dir.as_ref() { + return Ok(work_dir.join("multiparty.state.json")); + } + let flow_path = get_shared_flow_path(&flow_state.flow_name, &flow_state.session_id)?; + Ok(flow_path.join("multiparty.state.json")) +} + +fn persist_multiparty_state(flow_state: &MultipartyFlowState) -> Result<(), String> { + let state_path = state_file_for_flow(flow_state)?; + if let Some(parent) = state_path.parent() { + fs::create_dir_all(parent).map_err(|e| { + format!( + "Failed to create state directory {}: {}", + parent.display(), + e + ) + })?; + } + let state_json = serde_json::to_string_pretty(flow_state) + .map_err(|e| format!("Failed to serialize state: {}", e))?; + fs::write(&state_path, state_json) + .map_err(|e| format!("Failed to write state file {}: {}", state_path.display(), e))?; + Ok(()) +} + +fn ensure_flow_subscriptions( + flow_name: &str, + session_id: &str, + participant_emails: &[String], +) -> Result<(), String> { + let config = + biovault::config::Config::load().map_err(|e| format!("Failed to load config: {}", e))?; + let my_email = config.email.clone(); + let data_dir = config + .get_syftbox_data_dir() + .map_err(|e| format!("Failed to resolve SyftBox data dir: {}", e))?; + let syftsub_path = data_dir.join(".data").join("syft.sub.yaml"); + + let mut cfg = + subscriptions::load(&syftsub_path).unwrap_or_else(|_| subscriptions::default_config()); + let run_path = format!("shared/flows/{}/{}", flow_name, session_id); + let mut changed = false; + + for peer in participant_emails { + if peer.trim().is_empty() || peer.eq_ignore_ascii_case(&my_email) { + continue; + } + + let rule = subscriptions::Rule { + action: subscriptions::Action::Allow, + datasite: Some(peer.clone()), + path: format!("{}/**", run_path), + }; + + let exists = cfg.rules.iter().any(|existing| { + existing.action == rule.action + && existing + .datasite + .as_deref() + .map(|ds| ds.eq_ignore_ascii_case(peer)) + .unwrap_or(false) + && existing.path == rule.path + }); + + if !exists { + cfg.rules.push(rule); + changed = true; + } + } + + if changed { + subscriptions::save(&syftsub_path, &cfg) + .map_err(|e| format!("Failed to write syft.sub.yaml: {}", e))?; + } + + Ok(()) +} + +fn copy_dir_recursive(src: &Path, dst: &Path) -> Result<(), String> { + fs::create_dir_all(dst) + .map_err(|e| format!("Failed to create destination {}: {}", dst.display(), e))?; + for entry in fs::read_dir(src).map_err(|e| format!("Failed to read {}: {}", src.display(), e))? { + let entry = entry.map_err(|e| format!("Failed to read directory entry: {}", e))?; + let src_path = entry.path(); + let dst_path = dst.join(entry.file_name()); + if src_path.is_dir() { + copy_dir_recursive(&src_path, &dst_path)?; + } else if src_path.is_file() { + if let Some(parent) = dst_path.parent() { + fs::create_dir_all(parent) + .map_err(|e| format!("Failed to create directory {}: {}", parent.display(), e))?; + } + fs::copy(&src_path, &dst_path).map_err(|e| { + format!( + "Failed to copy {} -> {}: {}", + src_path.display(), + dst_path.display(), + e + ) + })?; + } + } + Ok(()) +} + +fn publish_flow_source_for_session( + flow_name: &str, + work_dir: &Path, + owner_email: &str, + participant_emails: &[String], + flow_spec: &serde_json::Value, +) -> Result, String> { + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + let source_flow_dir = biovault_home.join("flows").join(flow_name); + if !source_flow_dir.exists() { + return Ok(None); + } + + let dest_flow_dir = work_dir.join("_flow_source"); + if dest_flow_dir.exists() { + fs::remove_dir_all(&dest_flow_dir).map_err(|e| { + format!( + "Failed to clear existing flow source {}: {}", + dest_flow_dir.display(), + e + ) + })?; + } + copy_dir_recursive(&source_flow_dir, &dest_flow_dir)?; + + // Ensure all referenced modules are bundled into _flow_source/modules, + // even when the source flow points to modules outside flows//modules. + let spec_root = flow_spec_root(flow_spec); + if let Some(steps) = spec_root.get("steps").and_then(|s| s.as_array()) { + for step in steps { + let module_ref = step.get("uses").and_then(|v| v.as_str()); + let module_path = module_ref.and_then(|module_id| { + spec_root + .get("modules") + .and_then(|m| m.get(module_id)) + .and_then(|m| m.get("source")) + .and_then(|s| s.get("path")) + .and_then(|p| p.as_str()) + }); + + let Some(src_module_dir) = + resolve_module_directory(flow_name, module_path, module_ref) + else { + continue; + }; + + let module_name = src_module_dir + .file_name() + .and_then(|n| n.to_str()) + .map(|s| s.to_string()) + .or_else(|| { + module_path + .and_then(|p| PathBuf::from(p).file_name().map(|n| n.to_string_lossy().to_string())) + }) + .or_else(|| { + module_ref + .map(|r| r.trim_start_matches("./").trim_start_matches("modules/").replace('_', "-")) + }) + .unwrap_or_else(|| "module".to_string()); + + let dest_module_dir = dest_flow_dir.join("modules").join(module_name); + if !dest_module_dir.exists() { + copy_dir_recursive(&src_module_dir, &dest_module_dir)?; + } + } + } + + create_syft_pub_yaml(&dest_flow_dir, owner_email, participant_emails)?; + Ok(Some(dest_flow_dir)) +} + +/// Get the step output path within a shared flow +/// Structure: {flow_path}/{step_number}-{step_id}/ +fn get_step_path(flow_path: &PathBuf, step_number: usize, step_id: &str) -> PathBuf { + flow_path.join(format!("{}-{}", step_number, step_id)) +} + +fn get_padded_step_path(flow_path: &PathBuf, step_number: usize, step_id: &str) -> PathBuf { + flow_path.join(format!("{:02}-{}", step_number, step_id)) +} + +fn merge_directory_missing_entries(source_dir: &Path, target_dir: &Path) -> Result<(), String> { + if !source_dir.exists() { + return Ok(()); + } + fs::create_dir_all(target_dir).map_err(|e| { + format!( + "Failed to create merge target {}: {}", + target_dir.display(), + e + ) + })?; + + for entry in fs::read_dir(source_dir).map_err(|e| { + format!( + "Failed to read merge source {}: {}", + source_dir.display(), + e + ) + })? { + let entry = entry.map_err(|e| format!("Failed to read merge entry: {}", e))?; + let src_path = entry.path(); + let dst_path = target_dir.join(entry.file_name()); + + if src_path.is_dir() { + merge_directory_missing_entries(&src_path, &dst_path)?; + continue; + } + + if !dst_path.exists() { + if let Some(parent) = dst_path.parent() { + fs::create_dir_all(parent).map_err(|e| { + format!( + "Failed to create merge destination parent {}: {}", + parent.display(), + e + ) + })?; + } + fs::copy(&src_path, &dst_path).map_err(|e| { + format!( + "Failed to copy {} to {}: {}", + src_path.display(), + dst_path.display(), + e + ) + })?; + } + } + + Ok(()) +} + +fn canonicalize_step_dir_name(flow_path: &PathBuf, step_number: usize, step_id: &str) -> PathBuf { + let canonical = get_step_path(flow_path, step_number, step_id); + let padded = get_padded_step_path(flow_path, step_number, step_id); + + // If both exist, reconcile mixed historical layouts by merging padded-only files. + if canonical.exists() && padded.exists() { + // Reconcile mixed historical layouts by merging padded-only files. + let _ = merge_directory_missing_entries(&padded, &canonical); + let _ = fs::remove_dir_all(&padded); + return canonical; + } + + // Preserve existing naming style to avoid creating duplicate step folders. + if canonical.exists() { + canonical + } else if padded.exists() { + padded + } else { + canonical + } +} + +fn reconcile_local_step_dirs(flow_state: &MultipartyFlowState) { + let Some(work_dir) = flow_state.work_dir.as_ref() else { + return; + }; + for (idx, step) in flow_state.steps.iter().enumerate() { + let _ = canonicalize_step_dir_name(work_dir, idx + 1, &step.id); + } +} + +fn list_step_dirs_for_id(flow_dir: &Path, step_id: &str) -> Vec { + let mut matches = Vec::new(); + let suffix = format!("-{}", step_id); + if let Ok(entries) = fs::read_dir(flow_dir) { + for entry in entries.flatten() { + let path = entry.path(); + if !path.is_dir() { + continue; + } + let Some(name) = path.file_name().and_then(|n| n.to_str()) else { + continue; + }; + if name.ends_with(&suffix) { + matches.push(path); + } + } + } + matches +} + +fn has_step_share_marker(flow_dir: &Path, step_id: &str) -> bool { + list_step_dirs_for_id(flow_dir, step_id) + .into_iter() + .any(|dir| dir.join("syft.pub.yaml").exists()) +} + +/// Get the progress path for coordination +/// Structure: {flow_path}/_progress/ +fn get_progress_path(flow_path: &PathBuf) -> PathBuf { + flow_path.join("_progress") +} + +/// Private local step-log path (not synced/shared with other participants). +fn get_private_step_log_path(session_id: &str, step_id: &str) -> Result { + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + let dir = biovault_home + .join(".biovault") + .join("multiparty_step_logs") + .join(session_id); + fs::create_dir_all(&dir) + .map_err(|e| format!("Failed to create private step log directory: {}", e))?; + Ok(dir.join(format!("{}.log", step_id))) +} + +fn append_private_step_log(session_id: &str, step_id: &str, message: &str) { + let Ok(path) = get_private_step_log_path(session_id, step_id) else { + return; + }; + let Ok(mut file) = fs::OpenOptions::new().create(true).append(true).open(path) else { + return; + }; + let _ = writeln!(file, "{} {}", Utc::now().to_rfc3339(), message); +} + +fn read_tail_lines(path: &PathBuf, lines: usize) -> Result { + if !path.exists() { + return Ok(String::new()); + } + let file = + fs::File::open(path).map_err(|e| format!("Failed to open {}: {}", path.display(), e))?; + let reader = BufReader::new(file); + let all_lines: Vec = reader.lines().map_while(Result::ok).collect(); + if all_lines.is_empty() { + return Ok(String::new()); + } + let selected: Vec = all_lines + .into_iter() + .rev() + .take(lines) + .collect::>() + .into_iter() + .rev() + .collect(); + Ok(selected.join("\n")) +} + +fn count_files_recursive(root: &Path, suffix: &str) -> usize { + if !root.exists() { + return 0; + } + let mut total = 0usize; + let Ok(entries) = fs::read_dir(root) else { + return 0; + }; + for entry in entries.flatten() { + let path = entry.path(); + if path.is_dir() { + total += count_files_recursive(&path, suffix); + continue; + } + if suffix.is_empty() { + total += 1; + continue; + } + if path + .file_name() + .and_then(|n| n.to_str()) + .map(|n| n.ends_with(suffix)) + .unwrap_or(false) + { + total += 1; + } + } + total +} + +fn tcp_port_is_listening(port: u16) -> bool { + let addr = SocketAddrV4::new(Ipv4Addr::LOCALHOST, port); + TcpStream::connect_timeout(&addr.into(), Duration::from_millis(120)).is_ok() +} + +fn select_step_log_lines(log_text: &str, step_id: &str, lines: usize) -> String { + let all_lines: Vec = log_text.lines().map(|s| s.to_string()).collect(); + if all_lines.is_empty() { + return String::new(); + } + let include_noisy_syftbox = env::var("BV_INCLUDE_NOISY_SYFTBOX_LOGS") + .ok() + .map(|v| is_truthy(&v)) + .unwrap_or(false); + + let filtered_source: Vec = if include_noisy_syftbox { + all_lines.clone() + } else { + all_lines + .iter() + .filter_map(|line| { + let lc = line.to_ascii_lowercase(); + if lc.contains("acl staging grace") { + return None; + } + let is_syftbox_noise = lc.contains("syftbox") + && (lc.contains("network") + || lc.contains("websocket") + || lc.contains("http") + || lc.contains("queue") + || lc.contains("heartbeat") + || lc.contains("poll")); + let keep = !is_syftbox_noise + || lc.contains("syqure") + || lc.contains("mpc") + || lc.contains("secure_aggregate") + || lc.contains("tcp proxy") + || lc.contains("step_") + || lc.contains("step "); + if keep { + Some(line.clone()) + } else { + None + } + }) + .collect() + }; + + let window = lines.saturating_mul(4); + let start_index = filtered_source.len().saturating_sub(window); + let tail_window: Vec = filtered_source.into_iter().skip(start_index).collect(); + + let step_lc = step_id.to_ascii_lowercase(); + let needle_a = format!("step {}", step_lc); + let needle_b = format!("step '{}'", step_lc); + let needle_c = format!("step \"{}\"", step_lc); + let needle_d = format!("{}@", step_lc); + let needle_e = format!("\"step\":\"{}\"", step_lc); + + let filtered: Vec = tail_window + .iter() + .filter_map(|line| { + let lc = line.to_ascii_lowercase(); + if lc.contains(&needle_a) + || lc.contains(&needle_b) + || lc.contains(&needle_c) + || lc.contains(&needle_d) + || lc.contains(&needle_e) + { + Some(line.clone()) + } else { + None + } + }) + .collect(); + + let selected: Vec = if filtered.is_empty() { + tail_window + .into_iter() + .rev() + .take(lines) + .collect::>() + .into_iter() + .rev() + .collect() + } else { + filtered + .into_iter() + .rev() + .take(lines) + .collect::>() + .into_iter() + .rev() + .collect() + }; + + selected.join("\n") +} + +fn collect_step_readiness_blockers(flow_state: &MultipartyFlowState, step_id: &str) -> Vec { + let Some(step) = flow_state.steps.iter().find(|s| s.id == step_id) else { + return vec![format!("step '{}' not found in session state", step_id)]; + }; + + let mut lines = Vec::new(); + lines.push(format!( + "status={:?} my_action={} is_barrier={} shares_output={}", + step.status, step.my_action, step.is_barrier, step.shares_output + )); + if !step.targets.is_empty() { + lines.push(format!("targets={}", step.targets.join(", "))); + } + if !step.target_emails.is_empty() { + lines.push(format!("target_emails={}", step.target_emails.join(", "))); + } + + for dep_id in &step.depends_on { + let Some(dep_step) = flow_state.steps.iter().find(|s| s.id == *dep_id) else { + lines.push(format!("dependency '{}' missing from session state", dep_id)); + continue; + }; + + let dep_complete = is_dependency_complete(flow_state, dep_id); + lines.push(format!( + "dependency '{}' status={:?} complete={} require_shared={}", + dep_id, dep_step.status, dep_complete, dep_step.shares_output + )); + if dep_complete { + continue; + } + + if dep_step.target_emails.is_empty() { + lines.push(format!( + " - dependency '{}' has no resolved target_emails", + dep_id + )); + continue; + } + + for target_email in &dep_step.target_emails { + if let Some(participant) = flow_state + .participants + .iter() + .find(|p| p.email.eq_ignore_ascii_case(target_email)) + { + let done = check_participant_step_complete( + &flow_state.flow_name, + &flow_state.session_id, + &flow_state.my_email, + &participant.email, + &participant.role, + dep_id, + dep_step.shares_output, + ); + lines.push(format!( + " - participant {} role={} dependency '{}' complete={}", + participant.email, participant.role, dep_id, done + )); + } else { + lines.push(format!( + " - participant '{}' for dependency '{}' not found in participant map", + target_email, dep_id + )); + } + } + } + + if step.is_barrier { + if let Some(wait_for) = &step.barrier_wait_for { + lines.push(format!("barrier.wait_for={}", wait_for)); + let require_shared = flow_state + .steps + .iter() + .find(|s| s.id == *wait_for) + .map(|s| s.shares_output) + .unwrap_or(false); + for target_email in &step.target_emails { + if let Some(participant) = flow_state + .participants + .iter() + .find(|p| p.email.eq_ignore_ascii_case(target_email)) + { + let done = check_participant_step_complete( + &flow_state.flow_name, + &flow_state.session_id, + &flow_state.my_email, + &participant.email, + &participant.role, + wait_for, + require_shared, + ); + lines.push(format!( + " - barrier target {} wait_for '{}' complete={} require_shared={}", + participant.email, wait_for, done, require_shared + )); + } + } + } else { + lines.push("barrier step has no wait_for configured".to_string()); + } + } + + lines +} + +fn collect_mpc_tcp_channel_diagnostics(mpc_dir: &Path) -> Vec { + let mut channels = Vec::new(); + let Ok(entries) = fs::read_dir(mpc_dir) else { + return channels; + }; + for entry in entries.flatten() { + let channel_dir = entry.path(); + if !channel_dir.is_dir() { + continue; + } + let Some(channel_name) = channel_dir.file_name().and_then(|n| n.to_str()) else { + continue; + }; + if !channel_name.contains("_to_") { + continue; + } + let marker_path = channel_dir.join("stream.tcp"); + let accept_path = channel_dir.join("stream.accept"); + let marker_exists = marker_path.exists(); + let accept_exists = accept_path.exists(); + let request_count = count_files_recursive(&channel_dir, ".request"); + let response_count = count_files_recursive(&channel_dir, ".response"); + + let mut marker_port = None::; + let mut marker_from = None::; + let mut marker_to = None::; + if marker_exists { + if let Ok(raw) = fs::read_to_string(&marker_path) { + if let Ok(json) = serde_json::from_str::(&raw) { + marker_port = json + .get("port") + .and_then(|v| v.as_u64()) + .and_then(|v| u16::try_from(v).ok()); + marker_from = json + .get("from") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + marker_to = json + .get("to") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + } + } + } + let listener_up = marker_port.map(tcp_port_is_listening); + let status = if listener_up == Some(true) { + "connected" + } else if marker_exists || accept_exists { + "establishing" + } else { + "waiting" + }; + + channels.push(MultipartyMpcChannelDiagnostics { + channel_id: channel_name.to_string(), + from_email: marker_from, + to_email: marker_to, + port: marker_port, + marker: marker_exists, + accept: accept_exists, + listener_up, + requests: request_count, + responses: response_count, + status: status.to_string(), + }); + } + + channels.sort_by(|a, b| a.channel_id.cmp(&b.channel_id)); + channels +} + +fn collect_mpc_tcp_marker_status(mpc_dir: &Path) -> Vec { + collect_mpc_tcp_channel_diagnostics(mpc_dir) + .into_iter() + .map(|channel| { + let port_text = channel + .port + .map(|p| p.to_string()) + .unwrap_or_else(|| "n/a".to_string()); + let listener = match channel.listener_up { + Some(true) => "up", + Some(false) => "down", + None => "unknown", + }; + format!( + "{} marker={} accept={} port={} listener={} requests={} responses={}", + channel.channel_id, + if channel.marker { "yes" } else { "no" }, + if channel.accept { "yes" } else { "no" }, + port_text, + listener, + channel.requests, + channel.responses + ) + }) + .collect() +} + +fn short_hotlink_mode(mode: &str) -> &'static str { + match mode { + "hotlink_quic_only" => "quic-only", + "hotlink_quic_pref" => "quic-pref", + "hotlink_ws_only" => "ws-only", + _ => "unknown", + } +} + +fn read_hotlink_telemetry(path: &Path) -> Option { + let raw = fs::read_to_string(path).ok()?; + let v = serde_json::from_str::(&raw).ok()?; + Some(HotlinkTelemetrySnapshot { + mode: v + .get("mode") + .and_then(|x| x.as_str()) + .unwrap_or("unknown") + .to_string(), + updated_ms: v.get("updated_ms").and_then(|x| x.as_u64()), + tx_packets: v.get("tx_packets").and_then(|x| x.as_u64()).unwrap_or(0), + tx_bytes: v.get("tx_bytes").and_then(|x| x.as_u64()).unwrap_or(0), + tx_quic_packets: v + .get("tx_quic_packets") + .and_then(|x| x.as_u64()) + .unwrap_or(0), + tx_ws_packets: v.get("tx_ws_packets").and_then(|x| x.as_u64()).unwrap_or(0), + tx_avg_send_ms: v + .get("tx_avg_send_ms") + .and_then(|x| x.as_f64()) + .unwrap_or(0.0), + rx_packets: v.get("rx_packets").and_then(|x| x.as_u64()).unwrap_or(0), + rx_bytes: v.get("rx_bytes").and_then(|x| x.as_u64()).unwrap_or(0), + rx_avg_write_ms: v + .get("rx_avg_write_ms") + .and_then(|x| x.as_f64()) + .unwrap_or(0.0), + ws_fallbacks: v.get("ws_fallbacks").and_then(|x| x.as_u64()).unwrap_or(0), + }) +} + +fn hotlink_telemetry_candidates(biovault_home: &Path, email: &str) -> Vec { + let datasites_root = biovault_home.join("datasites"); + vec![ + datasites_root + .join(email) + .join(".syftbox") + .join("hotlink_telemetry.json"), + datasites_root + .join(email) + .join("datasites") + .join(email) + .join(".syftbox") + .join("hotlink_telemetry.json"), + biovault_home + .join(email) + .join(".syftbox") + .join("hotlink_telemetry.json"), + ] +} + +fn resolve_module_directory( + flow_name: &str, + module_path: Option<&str>, + module_ref: Option<&str>, +) -> Option { + let biovault_home = biovault::config::get_biovault_home().ok()?; + let default_flow_root = biovault_home.join("flows").join(flow_name); + let flow_root = biovault::data::BioVaultDb::new() + .ok() + .and_then(|db| db.list_flows().ok()) + .and_then(|flows| { + flows + .into_iter() + .find(|flow| flow.name == flow_name) + .map(|flow| PathBuf::from(flow.flow_path)) + }) + .filter(|p| p.exists()) + .unwrap_or(default_flow_root); + let modules_root = biovault_home.join("modules"); + + let mut candidates: Vec = Vec::new(); + + if let Some(path_str) = module_path.map(str::trim).filter(|s| !s.is_empty()) { + let raw = PathBuf::from(path_str); + if raw.is_absolute() { + candidates.push(raw); + } else { + let trimmed = path_str.trim_start_matches("./"); + candidates.push(flow_root.join(trimmed)); + candidates.push(modules_root.join(trimmed.trim_start_matches("modules/"))); + } + } + + if let Some(module_ref) = module_ref.map(str::trim).filter(|s| !s.is_empty()) { + let raw = PathBuf::from(module_ref); + if raw.is_absolute() { + candidates.push(raw); + } else { + let trimmed = module_ref.trim_start_matches("./"); + // Handle path-like refs such as "./modules/gen-variants". + candidates.push(flow_root.join(trimmed)); + candidates.push(modules_root.join(trimmed.trim_start_matches("modules/"))); + // If ref already includes "modules/", avoid duplicating. + if let Some(stripped) = trimmed.strip_prefix("modules/") { + candidates.push(flow_root.join("modules").join(stripped)); + candidates.push(modules_root.join(stripped)); + } else { + candidates.push(flow_root.join("modules").join(trimmed)); + candidates.push(modules_root.join(trimmed)); + } + // Handle short refs such as "gen_variants". + candidates.push(flow_root.join("modules").join(module_ref.replace('_', "-"))); + candidates.push(modules_root.join(module_ref.replace('_', "-"))); + } + } + + candidates.sort(); + candidates.dedup(); + if let Some(found) = candidates.into_iter().find(|candidate| candidate.exists()) { + return Some(found); + } + + // Fallback: support version-suffixed module directories in global modules root, + // e.g. flow references "./modules/gen-variants" while installed module is + // "modules/gen-variants-0.1.1". + let mut lookup_names: Vec = Vec::new(); + if let Some(path_str) = module_path.map(str::trim).filter(|s| !s.is_empty()) { + let base = PathBuf::from(path_str.trim_start_matches("./")); + if let Some(name) = base.file_name().and_then(|n| n.to_str()) { + lookup_names.push(name.to_string()); + } + } + if let Some(module_ref) = module_ref.map(str::trim).filter(|s| !s.is_empty()) { + let base = PathBuf::from(module_ref.trim_start_matches("./")); + if let Some(name) = base.file_name().and_then(|n| n.to_str()) { + lookup_names.push(name.to_string()); + } + lookup_names.push(module_ref.trim_start_matches("./modules/").replace('_', "-")); + } + lookup_names.sort(); + lookup_names.dedup(); + + if let Ok(entries) = fs::read_dir(&modules_root) { + let mut versioned_matches: Vec = Vec::new(); + for entry in entries.flatten() { + let path = entry.path(); + if !path.is_dir() { + continue; + } + let name = entry.file_name().to_string_lossy().to_string(); + for base in &lookup_names { + if name == *base || name.starts_with(&format!("{}-", base)) { + versioned_matches.push(path.clone()); + break; + } + } + } + if !versioned_matches.is_empty() { + versioned_matches.sort(); + return versioned_matches.last().cloned(); + } + } + + None +} + +fn read_syqure_runner_config(module_dir: &Path) -> Result<(String, String, u64), String> { + let module_yaml_path = if module_dir.join("module.yaml").exists() { + module_dir.join("module.yaml") + } else if module_dir.join("module.yml").exists() { + module_dir.join("module.yml") + } else { + return Err(format!( + "Missing module.yaml/module.yml in {}", + module_dir.display() + )); + }; + + let yaml = fs::read_to_string(&module_yaml_path).map_err(|e| { + format!( + "Failed to read module config {}: {}", + module_yaml_path.display(), + e + ) + })?; + let parsed: serde_yaml::Value = + serde_yaml::from_str(&yaml).map_err(|e| format!("Invalid module yaml: {}", e))?; + + let runner = parsed + .get("spec") + .and_then(|v| v.get("runner")) + .cloned() + .unwrap_or(serde_yaml::Value::Null); + + let entrypoint = runner + .get("entrypoint") + .and_then(|v| v.as_str()) + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .unwrap_or_else(|| "smpc_aggregate.codon".to_string()); + + let syqure_cfg = runner + .get("syqure") + .cloned() + .unwrap_or(serde_yaml::Value::Null); + let transport = syqure_cfg + .get("transport") + .and_then(|v| v.as_str()) + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .unwrap_or_else(|| "file".to_string()); + let poll_ms = syqure_cfg + .get("poll_ms") + .and_then(|v| v.as_u64()) + .unwrap_or(50); + + Ok((entrypoint, transport, poll_ms)) +} + +fn validate_module_assets_exist(module_dir: &Path) -> Result<(), String> { + let module_yaml_path = if module_dir.join("module.yaml").exists() { + module_dir.join("module.yaml") + } else if module_dir.join("module.yml").exists() { + module_dir.join("module.yml") + } else { + return Ok(()); + }; + + let yaml = fs::read_to_string(&module_yaml_path).map_err(|e| { + format!( + "Failed to read module config {}: {}", + module_yaml_path.display(), + e + ) + })?; + let parsed: serde_yaml::Value = + serde_yaml::from_str(&yaml).map_err(|e| format!("Invalid module yaml: {}", e))?; + + let assets = parsed + .get("spec") + .and_then(|v| v.get("assets")) + .and_then(|v| v.as_sequence()) + .cloned() + .unwrap_or_default(); + if assets.is_empty() { + return Ok(()); + } + + let mut missing: Vec = Vec::new(); + for asset in assets { + let rel_path = asset + .get("path") + .and_then(|p| p.as_str()) + .map(|s| s.trim()) + .filter(|s| !s.is_empty()); + let Some(rel_path) = rel_path else { + continue; + }; + let abs = module_dir.join(rel_path); + if !abs.exists() { + missing.push(rel_path.to_string()); + } + } + + if missing.is_empty() { + Ok(()) + } else { + Err(format!( + "Module assets missing in {}: {}", + module_dir.display(), + missing.join(", ") + )) + } +} + +fn preflight_validate_flow_modules(flow_name: &str, flow_spec: &serde_json::Value) -> Result<(), String> { + let spec_root = flow_spec_root(flow_spec); + let steps = spec_root + .get("steps") + .and_then(|s| s.as_array()) + .cloned() + .unwrap_or_default(); + + let mut issues: Vec = Vec::new(); + + for step in steps { + let step_id = step + .get("id") + .and_then(|v| v.as_str()) + .unwrap_or("unknown-step"); + + let module_ref = step.get("uses").and_then(|v| v.as_str()); + if module_ref.is_none() { + continue; + } + + let module_path = module_ref.and_then(|module_id| { + spec_root + .get("modules") + .and_then(|m| m.get(module_id)) + .and_then(|m| m.get("source")) + .and_then(|s| s.get("path")) + .and_then(|p| p.as_str()) + }); + + let Some(module_dir) = + resolve_module_directory(flow_name, module_path, module_ref) + else { + issues.push(format!( + "step '{}' references module '{}' but it could not be resolved", + step_id, + module_ref.unwrap_or("") + )); + continue; + }; + + if let Err(err) = validate_module_assets_exist(&module_dir) { + issues.push(format!("step '{}': {}", step_id, err)); + } + } + + if issues.is_empty() { + Ok(()) + } else { + Err(format!( + "Flow invitation blocked: missing module files/assets.\n{}", + issues + .into_iter() + .map(|i| format!("- {}", i)) + .collect::>() + .join("\n") + )) + } +} + +fn is_truthy(value: &str) -> bool { + matches!( + value.trim().to_ascii_lowercase().as_str(), + "1" | "true" | "yes" | "on" + ) +} + +fn mpc_comm_port_with_base( + base: usize, + local_pid: usize, + remote_pid: usize, + parties: usize, +) -> usize { + let min_pid = std::cmp::min(local_pid, remote_pid); + let max_pid = std::cmp::max(local_pid, remote_pid); + let offset_major = min_pid * parties - min_pid * (min_pid + 1) / 2; + let offset_minor = max_pid - min_pid; + base + offset_major + offset_minor +} + +fn stable_syqure_port_base_for_run(run_id: &str, party_count: usize) -> Result { + run_dynamic::prepare_syqure_port_base_for_run(run_id, party_count, None).map_err(|e| { + format!( + "Failed to allocate Syqure TCP proxy base port for run '{}': {}", + run_id, e + ) + }) +} + +fn setup_mpc_channel_permissions( + work_dir: &Path, + owner_email: &str, + party_emails: &[String], + local_party_id: usize, + tcp_proxy_enabled: bool, + syqure_port_base: Option, +) -> Result<(), String> { + let mpc_root = work_dir.join("_mpc"); + fs::create_dir_all(&mpc_root) + .map_err(|e| format!("Failed to create mpc root {}: {}", mpc_root.display(), e))?; + + // Root-level permissions so all participants can discover MPC transport logs/channels. + create_syft_pub_yaml(&mpc_root, owner_email, party_emails)?; + + for (remote_id, remote_email) in party_emails.iter().enumerate() { + if remote_id == local_party_id { + continue; + } + let channel_dir = mpc_root.join(format!("{}_to_{}", local_party_id, remote_id)); + fs::create_dir_all(&channel_dir).map_err(|e| { + format!( + "Failed to create mpc channel {}: {}", + channel_dir.display(), + e + ) + })?; + + // Match CLI flow permissions: sender+receiver can read/write, sender is admin. + let perms_path = channel_dir.join("syft.pub.yaml"); + if !perms_path.exists() { + let channel_doc = serde_json::json!({ + "rules": [ + { + "pattern": "**", + "access": { + "admin": [owner_email], + "read": [owner_email, remote_email], + "write": [owner_email, remote_email], + }, + }, + ], + }); + let yaml = serde_yaml::to_string(&channel_doc) + .map_err(|e| format!("Failed to serialize {}: {}", perms_path.display(), e))?; + fs::write(&perms_path, yaml) + .map_err(|e| format!("Failed to write {}: {}", perms_path.display(), e))?; + } + + if tcp_proxy_enabled { + let global_base = syqure_port_base + .ok_or_else(|| "Missing Syqure port base while tcp proxy is enabled".to_string())?; + let parties = party_emails.len().max(2); + let port = mpc_comm_port_with_base(global_base, local_party_id, remote_id, parties); + let from_base = global_base + local_party_id * SEQURE_COMMUNICATION_PORT_STRIDE; + let to_base = global_base + remote_id * SEQURE_COMMUNICATION_PORT_STRIDE; + let from_port = mpc_comm_port_with_base(from_base, local_party_id, remote_id, parties); + let to_port = mpc_comm_port_with_base(to_base, remote_id, local_party_id, parties); + let marker = serde_json::json!({ + "from": owner_email, + "to": remote_email, + "port": port, + "ports": { + owner_email: from_port, + remote_email: to_port, + }, + }); + let marker_path = channel_dir.join("stream.tcp"); + let accept_path = channel_dir.join("stream.accept"); + fs::write(&marker_path, marker.to_string()) + .map_err(|e| format!("Failed to write {}: {}", marker_path.display(), e))?; + fs::write(&accept_path, "1") + .map_err(|e| format!("Failed to write {}: {}", accept_path.display(), e))?; + } + } + + Ok(()) +} + +fn maybe_setup_mpc_channels( + flow_spec: &serde_json::Value, + work_dir: &Path, + my_email: &str, + party_emails: &[String], + session_id: &str, +) -> Result, String> { + let has_mpc = flow_spec_root(flow_spec).get("mpc").is_some(); + if !has_mpc { + return Ok(None); + } + + let party_count = party_emails.len(); + let local_party_id = party_emails + .iter() + .position(|email| email.eq_ignore_ascii_case(my_email)) + .unwrap_or(0); + + // TCP proxy is always on (syqure integrated). Only an explicit + // SEQURE_TCP_PROXY=0 can disable it (e.g. Windows container path). + let tcp_proxy_enabled = env::var("SEQURE_TCP_PROXY") + .ok() + .map(|v| is_truthy(&v)) + .unwrap_or(true); + + let syqure_port_base = if tcp_proxy_enabled { + Some(stable_syqure_port_base_for_run(session_id, party_count)?) + } else { + None + }; + + setup_mpc_channel_permissions( + work_dir, + my_email, + party_emails, + local_party_id, + tcp_proxy_enabled, + syqure_port_base, + )?; + + Ok(syqure_port_base) +} + +fn flow_has_hotlink_transport(flow_spec: &serde_json::Value) -> bool { + let modules = flow_spec_root(flow_spec) + .get("modules") + .and_then(|m| m.as_object()); + let Some(modules) = modules else { + return false; + }; + for (_name, module_def) in modules { + let source_path = module_def + .get("source") + .and_then(|s| s.get("path")) + .and_then(|p| p.as_str()); + if let Some(path) = source_path { + let module_dir_candidates = resolve_module_directory_from_flow_spec(path); + for module_dir in module_dir_candidates { + if let Ok((_, transport, _)) = read_syqure_runner_config(&module_dir) { + if transport.eq_ignore_ascii_case("hotlink") { + return true; + } + } + } + } + } + false +} + +fn resolve_module_directory_from_flow_spec(source_path: &str) -> Vec { + let mut candidates = Vec::new(); + let biovault_home = match biovault::config::get_biovault_home() { + Ok(h) => h, + Err(_) => return candidates, + }; + let flows_root = biovault_home.join("flows"); + if let Ok(entries) = fs::read_dir(&flows_root) { + for entry in entries.flatten() { + let flow_dir = entry.path(); + if flow_dir.is_dir() { + let trimmed = source_path.trim_start_matches("./"); + let candidate = flow_dir.join(trimmed); + if candidate.exists() { + candidates.push(candidate); + } + } + } + } + candidates +} + +fn read_module_output_path(module_dir: &Path, output_name: &str) -> Option { + let yaml_path = if module_dir.join("module.yaml").exists() { + module_dir.join("module.yaml") + } else if module_dir.join("module.yml").exists() { + module_dir.join("module.yml") + } else { + return None; + }; + let yaml = fs::read_to_string(&yaml_path).ok()?; + let parsed: serde_yaml::Value = serde_yaml::from_str(&yaml).ok()?; + let outputs = parsed.get("spec")?.get("outputs")?.as_sequence()?; + for output in outputs { + let name = output.get("name")?.as_str()?; + if name == output_name { + return output + .get("path") + .and_then(|p| p.as_str()) + .map(|s| s.to_string()) + .or_else(|| Some(format!("{}.json", output_name))); + } + } + None +} + +fn resolve_share_source_output( + flow_spec: &serde_json::Value, + source_step_id: &str, + share_name: &str, +) -> Option { + let steps = flow_spec_root(flow_spec).get("steps")?.as_array()?; + for step in steps { + let id = step.get("id")?.as_str()?; + if id != source_step_id { + continue; + } + let share = step.get("share")?.get(share_name)?; + let source = share.get("source")?.as_str()?; + if let Some(output_name) = source.strip_prefix("self.outputs.") { + return Some(output_name.to_string()); + } + } + None +} + +fn resolve_with_bindings( + with_bindings: &HashMap, + input_overrides: &HashMap, + flow_spec: &serde_json::Value, + flow_name: &str, + session_id: &str, + my_email: &str, + biovault_home: &Path, + step_numbers_by_id: &HashMap, + all_steps: &[StepState], + work_dir: &Path, + participants: &[FlowParticipant], +) -> Result, String> { + let mut step_args: Vec = Vec::new(); + let (groups, _) = build_group_map_from_participants(participants, flow_spec); + + for (input_name, binding_value) in with_bindings { + let (ref_str, only_group, without_group) = parse_binding_value(binding_value); + let ref_str = match ref_str { + Some(r) => r, + None => continue, + }; + + if let Some(group) = only_group { + if !is_email_in_group(my_email, &group, &groups) { + continue; + } + } + if let Some(group) = without_group { + if is_email_in_group(my_email, &group, &groups) { + continue; + } + } + + let is_url_list = ref_str.ends_with(".url_list"); + let base_ref = if is_url_list { + ref_str.trim_end_matches(".url_list") + } else { + &ref_str + }; + + let resolved = resolve_single_binding( + base_ref, + is_url_list, + input_overrides, + flow_spec, + flow_name, + session_id, + my_email, + biovault_home, + step_numbers_by_id, + all_steps, + work_dir, + input_name, + )?; + + if let Some(path) = resolved { + step_args.push(format!("--{}", input_name)); + step_args.push(path); + } + } + + Ok(step_args) +} + +fn parse_binding_value( + value: &serde_json::Value, +) -> (Option, Option, Option) { + match value { + serde_json::Value::String(s) => (Some(s.clone()), None, None), + serde_json::Value::Object(obj) => { + let ref_str = obj + .get("value") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + let only = obj + .get("only") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + let without = obj + .get("without") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + (ref_str, only, without) + } + _ => (None, None, None), + } +} + +fn is_email_in_group(email: &str, group_name: &str, groups: &HashMap>) -> bool { + if let Some(members) = groups.get(group_name) { + members.iter().any(|m| m.eq_ignore_ascii_case(email)) + } else { + false + } +} + +fn resolve_single_binding( + base_ref: &str, + is_url_list: bool, + input_overrides: &HashMap, + flow_spec: &serde_json::Value, + flow_name: &str, + session_id: &str, + my_email: &str, + biovault_home: &Path, + step_numbers_by_id: &HashMap, + all_steps: &[StepState], + work_dir: &Path, + input_name: &str, +) -> Result, String> { + fn maybe_convert_genotype_list_to_samplesheet( + input_name: &str, + base_ref: &str, + raw_value: &str, + work_dir: &Path, + ) -> Result, String> { + let normalized_input = input_name.trim().to_ascii_lowercase(); + let normalized_ref = base_ref.trim().to_ascii_lowercase(); + let is_genotype_list_binding = normalized_input == "participants" + || normalized_input.contains("genotype") + || normalized_ref.ends_with(".genotype_files") + || normalized_ref.ends_with(".samplesheet"); + if !is_genotype_list_binding { + return Ok(None); + } + + let files: Vec = raw_value + .split(',') + .map(|part| part.trim()) + .filter(|part| !part.is_empty()) + .map(|part| part.to_string()) + .collect(); + if files.is_empty() { + return Ok(None); + } + if !files.iter().all(|path| Path::new(path).is_file()) { + return Ok(None); + } + + let generated_dir = work_dir.join("_inputs").join("generated"); + fs::create_dir_all(&generated_dir).map_err(|e| { + format!( + "Failed to create generated inputs dir {}: {}", + generated_dir.display(), + e + ) + })?; + + let out_path = generated_dir.join(format!("{}_selected.csv", input_name)); + let mut csv = String::from("participant_id,genotype_file\n"); + for (idx, file_path) in files.iter().enumerate() { + let participant_id = Path::new(file_path) + .file_stem() + .and_then(|s| s.to_str()) + .map(|s| s.trim()) + .filter(|s| !s.is_empty()) + .unwrap_or("participant"); + let safe_participant = participant_id.replace(',', "_"); + let escaped_path = file_path.replace('"', "\"\""); + csv.push_str(&format!("{},\"{}\"\n", safe_participant, escaped_path)); + if idx > 10_000 { + break; + } + } + + fs::write(&out_path, csv).map_err(|e| { + format!( + "Failed to write generated samplesheet {}: {}", + out_path.display(), + e + ) + })?; + Ok(Some(out_path.to_string_lossy().to_string())) + } + + if let Some(input_name) = base_ref.strip_prefix("inputs.") { + if let Some(value) = input_overrides + .get(base_ref) + .or_else(|| input_overrides.get(input_name)) + { + if let Some(generated_samplesheet) = + maybe_convert_genotype_list_to_samplesheet(input_name, base_ref, value, work_dir)? + { + return Ok(Some(generated_samplesheet)); + } + return Ok(Some(value.clone())); + } + + if let Some(default_value) = flow_spec + .get("spec") + .and_then(|s| s.get("inputs")) + .and_then(|inputs| inputs.get(input_name)) + .and_then(|input| input.get("default")) + { + return match default_value { + serde_json::Value::Null => Ok(None), + serde_json::Value::String(s) => { + let trimmed = s.trim(); + if trimmed.is_empty() { + Ok(None) + } else { + Ok(Some(trimmed.to_string())) + } + } + serde_json::Value::Array(arr) => { + let joined = arr + .iter() + .filter_map(|v| match v { + serde_json::Value::String(s) => { + let trimmed = s.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed.to_string()) + } + } + _ => Some(v.to_string()), + }) + .collect::>() + .join(","); + if joined.trim().is_empty() { + Ok(None) + } else { + Ok(Some(joined)) + } + } + other => Ok(Some(other.to_string())), + }; + } + + return Ok(None); + } + + let parts: Vec<&str> = base_ref.split('.').collect(); + if parts.len() < 4 || parts[0] != "step" { + return Ok(Some(base_ref.to_string())); + } + + let source_step_id = parts[1]; + let ref_type = parts[2]; + let ref_name = parts[3]; + + let source_step_number = step_numbers_by_id.get(source_step_id).copied().unwrap_or(1); + + let source_module_ref = all_steps + .iter() + .find(|s| s.id == source_step_id) + .and_then(|s| s.module_ref.as_deref()); + let source_module_path = all_steps + .iter() + .find(|s| s.id == source_step_id) + .and_then(|s| s.module_path.as_deref()); + + let file_name = match ref_type { + "outputs" => { + let module_dir = + resolve_module_directory(flow_name, source_module_path, source_module_ref); + module_dir + .and_then(|dir| read_module_output_path(&dir, ref_name)) + .unwrap_or_else(|| format!("{}.json", ref_name)) + } + "share" => { + let output_name = resolve_share_source_output(flow_spec, source_step_id, ref_name) + .unwrap_or_else(|| ref_name.to_string()); + let module_dir = + resolve_module_directory(flow_name, source_module_path, source_module_ref); + module_dir + .and_then(|dir| read_module_output_path(&dir, &output_name)) + .unwrap_or_else(|| format!("{}.txt", ref_name)) + } + _ => return Ok(Some(base_ref.to_string())), + }; + + let source_target_emails: Vec = all_steps + .iter() + .find(|s| s.id == source_step_id) + .map(|s| s.target_emails.clone()) + .unwrap_or_default(); + + if is_url_list { + let manifest_dir = work_dir + .join(format!("{}-{}", source_step_number, source_step_id)) + .join("_manifests"); + let _ = fs::create_dir_all(&manifest_dir); + let manifest_path = manifest_dir.join(format!("{}.manifest.txt", input_name)); + + let mut manifest_lines = Vec::new(); + let mut unresolved_targets = Vec::new(); + for target_email in &source_target_emails { + if let Some(path) = find_participant_step_file( + &biovault_home.to_path_buf(), + my_email, + target_email, + flow_name, + session_id, + source_step_number, + source_step_id, + &file_name, + ) { + manifest_lines.push(format!("{}\t{}", target_email, path.display())); + } else { + unresolved_targets.push(target_email.clone()); + } + } + + if !unresolved_targets.is_empty() { + return Err(format!( + "Failed to resolve flow binding '{}' (input '{}'): missing '{}' from step '{}' for participants [{}] in run '{}' (viewer: {})", + base_ref, + input_name, + file_name, + source_step_id, + unresolved_targets.join(", "), + session_id, + my_email + )); + } + if manifest_lines.is_empty() { + return Err(format!( + "Failed to resolve flow binding '{}' (input '{}'): no participant outputs were found for step '{}' in run '{}' (viewer: {})", + base_ref, input_name, source_step_id, session_id, my_email + )); + } + fs::write(&manifest_path, manifest_lines.join("\n")) + .map_err(|e| format!("Failed to write manifest: {}", e))?; + Ok(Some(manifest_path.to_string_lossy().to_string())) + } else { + let source_email = if ref_type == "share" { + source_target_emails + .first() + .cloned() + .unwrap_or_else(|| my_email.to_string()) + } else { + my_email.to_string() + }; + let path = find_participant_step_file( + &biovault_home.to_path_buf(), + my_email, + &source_email, + flow_name, + session_id, + source_step_number, + source_step_id, + &file_name, + ); + let resolved = path.map(|p| p.to_string_lossy().to_string()); + if resolved.is_none() { + return Err(format!( + "Failed to resolve flow binding '{}' (input '{}'): missing '{}' from step '{}' for participant '{}' in run '{}' (viewer: {})", + base_ref, input_name, file_name, source_step_id, source_email, session_id, my_email + )); + } + Ok(resolved) + } +} + +fn find_participant_step_file( + biovault_home: &PathBuf, + viewer_email: &str, + participant_email: &str, + flow_name: &str, + session_id: &str, + step_number: usize, + step_id: &str, + file_name: &str, +) -> Option { + participant_flow_dirs_for_viewer( + biovault_home, + viewer_email, + participant_email, + flow_name, + session_id, + ) + .into_iter() + .find_map(|base| { + let direct = resolve_step_output_dir_for_base(&base, step_number, step_id) + .map(|dir| dir.join(file_name)) + .filter(|path| path.exists()); + if direct.is_some() { + return direct; + } + // Fallback: tolerate step-number drift by locating any "-" directory. + resolve_step_output_dir_by_id_any_number(&base, step_id) + .map(|dir| dir.join(file_name)) + .filter(|path| path.exists()) + }) +} + +fn resolve_step_output_dir_by_id_any_number(base: &PathBuf, step_id: &str) -> Option { + let entries = fs::read_dir(base).ok()?; + let mut best: Option<(usize, PathBuf)> = None; + for entry in entries.flatten() { + let path = entry.path(); + if !path.is_dir() { + continue; + } + let Some(name) = path.file_name().and_then(|n| n.to_str()) else { + continue; + }; + let Some((num_str, id_part)) = name.split_once('-') else { + continue; + }; + if id_part != step_id { + continue; + } + let Ok(num) = num_str.parse::() else { + continue; + }; + match &best { + Some((best_num, _)) if num <= *best_num => {} + _ => best = Some((num, path)), + } + } + best.map(|(_, p)| p) +} + +fn find_sandbox_root(path: &Path) -> Option { + path.ancestors() + .find(|ancestor| ancestor.file_name().and_then(|n| n.to_str()) == Some("sandbox")) + .map(|ancestor| ancestor.to_path_buf()) +} + +/// Return candidate flow directories for a participant from this viewer's perspective. +/// 1) synced datasite path (what this viewer has received) +/// 2) optional local sandbox sibling path, only for the viewer's own datasite +fn participant_flow_dirs_for_viewer( + biovault_home: &PathBuf, + viewer_email: &str, + participant_email: &str, + flow_name: &str, + session_id: &str, +) -> Vec { + let mut dirs = Vec::new(); + let mut seen = HashSet::new(); + let mut push_dir = |candidate: PathBuf| { + if seen.insert(candidate.clone()) { + dirs.push(candidate); + } + }; + + // Primary: current BioVault home layout. + push_dir( + biovault_home + .join("datasites") + .join(participant_email) + .join("shared") + .join("flows") + .join(flow_name) + .join(session_id), + ); + + // Fallback: derive from sandbox root if BIOVAULT_HOME points deeper than expected. + if let Some(sandbox_root) = find_sandbox_root(biovault_home) { + push_dir( + sandbox_root + .join(viewer_email) + .join("datasites") + .join(participant_email) + .join("shared") + .join("flows") + .join(flow_name) + .join(session_id), + ); + } + + // Legacy local-sandbox sibling fallback for viewer's own datasite. + if viewer_email == participant_email { + if let Some(parent) = biovault_home.parent() { + push_dir( + parent + .join(participant_email) + .join("datasites") + .join(participant_email) + .join("shared") + .join("flows") + .join(flow_name) + .join(session_id), + ); + } + } + + dirs +} + +/// Append a log entry to progress.json (JSONL format for event streaming) +fn append_progress_log(progress_dir: &PathBuf, event: &str, step_id: Option<&str>, role: &str) { + let timestamp = Utc::now().to_rfc3339(); + let log_entry = serde_json::json!({ + "timestamp": timestamp, + "event": event, + "step_id": step_id, + "role": role, + }); + + use std::fs::OpenOptions; + use std::io::Write; + // Legacy location used by existing tests/diagnostics. + let legacy_log_file = progress_dir.join("progress.json"); + if let Ok(mut file) = OpenOptions::new() + .create(true) + .append(true) + .open(&legacy_log_file) + { + let _ = writeln!(file, "{}", log_entry); + } + + // Canonical JSONL log stream. + let log_jsonl_file = progress_dir.join("log.jsonl"); + if let Ok(mut file) = OpenOptions::new() + .create(true) + .append(true) + .open(&log_jsonl_file) + { + let _ = writeln!(file, "{}", log_entry); + } +} + +fn write_progress_state( + progress_dir: &PathBuf, + role: &str, + event: &str, + step_id: Option<&str>, + status: &str, +) { + let state_file = progress_dir.join("state.json"); + // Flow runtime owns `_progress/state.json` with a structured `steps` map. + // Do not overwrite that shape with multiparty event snapshots. + if let Ok(existing) = fs::read_to_string(&state_file) { + if let Ok(existing_json) = serde_json::from_str::(&existing) { + if existing_json.get("steps").is_some() { + return; + } + } + } + + let state = serde_json::json!({ + "updated_at": Utc::now().to_rfc3339(), + "role": role, + "event": event, + "step_id": step_id, + "status": status, + }); + if let Ok(json) = serde_json::to_string_pretty(&state) { + let _ = fs::write(state_file, json); + } +} + +/// Create or update syft.pub.yaml in output directory to enable SyftBox sync. +/// Merges new readers into any existing permission file so that sharing steps +/// can widen access after a step initially creates owner-only permissions. +fn create_syft_pub_yaml( + output_dir: &PathBuf, + owner_email: &str, + read_emails: &[String], +) -> Result<(), String> { + let perm_path = output_dir.join("syft.pub.yaml"); + + let mut all_readers: Vec = read_emails.to_vec(); + + if perm_path.exists() { + if let Ok(contents) = fs::read_to_string(&perm_path) { + if let Ok(existing) = serde_yaml::from_str::(&contents) { + if let Some(rules) = existing.get("rules").and_then(|r| r.as_array()) { + for rule in rules { + if let Some(readers) = rule + .get("access") + .and_then(|a| a.get("read")) + .and_then(|r| r.as_array()) + { + for r in readers { + if let Some(email) = r.as_str() { + let email_s = email.to_string(); + if !all_readers.iter().any(|e| e.eq_ignore_ascii_case(&email_s)) + { + all_readers.push(email_s); + } + } + } + } + } + } + } + } + } + + let doc = serde_json::json!({ + "rules": [ + { + "pattern": "**", + "access": { + "admin": [owner_email], + "read": all_readers, + "write": Vec::::new(), + }, + }, + ], + }); + + let yaml = serde_yaml::to_string(&doc) + .map_err(|e| format!("Failed to serialize syft.pub.yaml: {}", e))?; + + fs::write(&perm_path, yaml).map_err(|e| format!("Failed to write syft.pub.yaml: {}", e))?; + + println!( + "[Multiparty] {} syft.pub.yaml at {:?} with read access for: {:?}", + if perm_path.exists() { + "Updated" + } else { + "Created" + }, + perm_path, + all_readers + ); + + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct SharedStepStatus { + pub step_id: String, + pub role: String, + pub status: String, + pub timestamp: i64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MultipartyFlowState { + pub session_id: String, + pub flow_name: String, + pub my_role: String, + pub my_email: String, + pub participants: Vec, + pub steps: Vec, + pub status: FlowSessionStatus, + pub thread_id: String, + pub work_dir: Option, + #[serde(default)] + pub run_id: Option, + #[serde(default)] + pub input_overrides: HashMap, + #[serde(default)] + pub flow_spec: Option, + #[serde(default)] + pub syqure_port_base: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum FlowSessionStatus { + Invited, + Accepted, + Running, + Completed, + Failed, + Cancelled, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct StepState { + pub id: String, + pub name: String, + pub description: String, + pub auto_run: bool, + pub status: StepStatus, + pub my_action: bool, + pub shares_output: bool, + pub share_to: Vec, + pub depends_on: Vec, + pub output_dir: Option, + pub outputs_shared: bool, + /// Target groups/emails that execute this step (group names) + pub targets: Vec, + /// Resolved target emails (for UI participant display) + pub target_emails: Vec, + /// Whether this is a barrier step (waits for others) + pub is_barrier: bool, + /// What step this barrier waits for + pub barrier_wait_for: Option, + /// Pretty JSON preview of the flow step config for UI inspection + pub code_preview: Option, + /// Module identifier referenced by `uses` + pub module_ref: Option, + /// Optional module source path (if available in flow spec) + pub module_path: Option, + #[serde(default)] + pub with_bindings: HashMap, + #[serde(default)] + pub input_waiting_on: Vec, + #[serde(default)] + pub input_waiting_reason: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub enum StepStatus { + #[default] + Pending, + WaitingForInputs, + Ready, + Running, + Completed, + Sharing, + Shared, + Failed, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct MultipartyStepDiagnostics { + pub session_id: String, + pub step_id: String, + pub flow_name: String, + pub local_email: String, + pub generated_at_ms: u64, + pub channels: Vec, + pub peers: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct MultipartyMpcChannelDiagnostics { + pub channel_id: String, + pub from_email: Option, + pub to_email: Option, + pub port: Option, + pub marker: bool, + pub accept: bool, + pub listener_up: Option, + pub requests: usize, + pub responses: usize, + pub status: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct MultipartyPeerTelemetryDiagnostics { + pub email: String, + pub telemetry_present: bool, + pub mode: String, + pub mode_short: String, + pub status: String, + pub updated_ms: Option, + pub age_ms: Option, + pub tx_packets: u64, + pub tx_bytes: u64, + pub tx_quic_packets: u64, + pub tx_ws_packets: u64, + pub tx_avg_send_ms: f64, + pub rx_packets: u64, + pub rx_bytes: u64, + pub rx_avg_write_ms: f64, + pub ws_fallbacks: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +struct HotlinkTelemetrySnapshot { + mode: String, + updated_ms: Option, + tx_packets: u64, + tx_bytes: u64, + tx_quic_packets: u64, + tx_ws_packets: u64, + tx_avg_send_ms: f64, + rx_packets: u64, + rx_bytes: u64, + rx_avg_write_ms: f64, + ws_fallbacks: u64, +} + +static FLOW_SESSIONS: Lazy>> = + Lazy::new(|| Mutex::new(HashMap::new())); + +/// Remove a multiparty session from in-memory cache so invitations can be re-accepted. +/// Called when a flow run is deleted to allow "Join Flow" again from messages. +pub fn clear_multiparty_session(session_id: &str) { + if let Ok(mut sessions) = FLOW_SESSIONS.lock() { + sessions.remove(session_id); + } +} + +/// Update dependent steps: if all their dependencies are now completed/shared, mark them Ready +fn update_dependent_steps(flow_state: &mut MultipartyFlowState, completed_step_id: &str) { + let mut steps_to_ready: HashSet = HashSet::new(); + + for step in &flow_state.steps { + // Only update steps that are Pending and have this step as a dependency + if step.status != StepStatus::Pending { + continue; + } + if !step.depends_on.contains(&completed_step_id.to_string()) { + continue; + } + if !step.my_action { + continue; + } + + // Check if all dependencies are now satisfied + let all_deps_met = step + .depends_on + .iter() + .all(|dep_id| is_dependency_complete(flow_state, dep_id)); + + if all_deps_met { + steps_to_ready.insert(step.id.clone()); + } + } + + for step in &mut flow_state.steps { + if steps_to_ready.contains(&step.id) { + step.status = StepStatus::Ready; + } + } +} + +/// Refresh local actionable step statuses from current dependency state. +/// This is needed for collaborative sessions where dependencies may complete on +/// remote participants between UI polls. +fn refresh_step_statuses(flow_state: &mut MultipartyFlowState) { + let step_numbers_by_id = flow_state + .steps + .iter() + .enumerate() + .map(|(i, s)| (s.id.clone(), i + 1)) + .collect::>(); + let all_steps_snapshot = flow_state.steps.clone(); + + let mut updates: Vec<(String, StepStatus, Vec, Option)> = Vec::new(); + + for step in &flow_state.steps { + if !step.my_action { + continue; + } + if step.is_barrier { + // Barrier progression is handled by update_barrier_steps based on + // cross-participant completion, not generic dependency refresh. + continue; + } + if step.status != StepStatus::Pending + && step.status != StepStatus::WaitingForInputs + && step.status != StepStatus::Ready + { + continue; + } + + let all_deps_met = step + .depends_on + .iter() + .all(|dep_id| is_dependency_complete(flow_state, dep_id)); + if !all_deps_met { + updates.push((step.id.clone(), StepStatus::Pending, Vec::new(), None)); + continue; + } + + let (status, waiting_on, reason) = check_step_input_readiness( + flow_state, + step, + &step_numbers_by_id, + &all_steps_snapshot, + ); + updates.push((step.id.clone(), status, waiting_on, reason)); + } + + for (step_id, status, waiting_on, reason) in updates { + if let Some(step) = flow_state.steps.iter_mut().find(|s| s.id == step_id) { + step.status = status; + step.input_waiting_on = waiting_on; + step.input_waiting_reason = reason; + } + } +} + +fn extract_waiting_emails_from_binding_error(err: &str) -> Vec { + let mut emails = Vec::new(); + if let Some(start_idx) = err.find("participants [") { + let start = start_idx + "participants [".len(); + if let Some(end_rel) = err[start..].find(']') { + let inner = &err[start..start + end_rel]; + for part in inner.split(',') { + let email = part.trim(); + if !email.is_empty() { + emails.push(email.to_string()); + } + } + } + } else if let Some(start_idx) = err.find("participant '") { + let start = start_idx + "participant '".len(); + if let Some(end_rel) = err[start..].find('\'') { + let email = err[start..start + end_rel].trim(); + if !email.is_empty() { + emails.push(email.to_string()); + } + } + } + emails.sort(); + emails.dedup(); + emails +} + +fn check_step_input_readiness( + flow_state: &MultipartyFlowState, + step: &StepState, + step_numbers_by_id: &HashMap, + all_steps_snapshot: &[StepState], +) -> (StepStatus, Vec, Option) { + if step.with_bindings.is_empty() { + return (StepStatus::Ready, Vec::new(), None); + } + let Some(flow_spec_ref) = flow_state.flow_spec.as_ref() else { + return ( + StepStatus::WaitingForInputs, + Vec::new(), + Some("Flow spec not available for input readiness check".to_string()), + ); + }; + let Some(work_dir_ref) = flow_state.work_dir.as_ref() else { + return ( + StepStatus::WaitingForInputs, + Vec::new(), + Some("Work directory not available for input readiness check".to_string()), + ); + }; + let biovault_home = match biovault::config::get_biovault_home() { + Ok(path) => path, + Err(err) => { + return ( + StepStatus::WaitingForInputs, + Vec::new(), + Some(format!("Unable to read BioVault home: {}", err)), + ) + } + }; + + match resolve_with_bindings( + &step.with_bindings, + &flow_state.input_overrides, + flow_spec_ref, + &flow_state.flow_name, + &flow_state.session_id, + &flow_state.my_email, + &biovault_home, + step_numbers_by_id, + all_steps_snapshot, + work_dir_ref, + &flow_state.participants, + ) { + Ok(_) => (StepStatus::Ready, Vec::new(), None), + Err(err) => { + if err.contains("Failed to resolve flow binding") { + let waiting_on = extract_waiting_emails_from_binding_error(&err); + (StepStatus::WaitingForInputs, waiting_on, Some(err)) + } else { + // Non-binding errors should surface at run time instead of permanently + // blocking readiness state. + (StepStatus::Ready, Vec::new(), None) + } + } + } +} + +/// Update barrier steps when their wait_for condition is satisfied +fn update_barrier_steps(flow_state: &mut MultipartyFlowState) { + let flow_name = flow_state.flow_name.clone(); + let session_id = flow_state.session_id.clone(); + let participants = flow_state.participants.clone(); + + // First pass: check barrier steps + let mut barriers_to_complete: Vec = Vec::new(); + + for step in &flow_state.steps { + if !step.is_barrier { + continue; + } + if step.status != StepStatus::WaitingForInputs && step.status != StepStatus::Ready { + continue; + } + + // Check if the barrier's wait_for step is complete by all targets + if let Some(ref wait_for_step_id) = step.barrier_wait_for { + let require_shared = flow_state + .steps + .iter() + .find(|s| s.id == *wait_for_step_id) + .map(|s| s.shares_output) + .unwrap_or(false); + // Get the target emails for this barrier from target_emails + let barrier_targets: Vec = step.target_emails.clone(); + + // Check if all barrier targets have completed the waited-for step + let all_complete = barrier_targets.iter().all(|target_email| { + // Find the participant for this target + if let Some(participant) = participants.iter().find(|p| &p.email == target_email) { + // Check progress file for this participant's waited-for step + check_participant_step_complete( + &flow_name, + &session_id, + &flow_state.my_email, + &participant.email, + &participant.role, + wait_for_step_id, + require_shared, + ) + } else { + false + } + }); + + if all_complete { + barriers_to_complete.push(step.id.clone()); + } + } + } + + // Second pass: mark completed barriers + for step in &mut flow_state.steps { + if barriers_to_complete.contains(&step.id) { + step.status = StepStatus::Completed; + } + } + + // Third pass: update steps that depend on completed barriers/dependencies. + let mut steps_to_ready: HashSet = HashSet::new(); + for step in &flow_state.steps { + if step.status == StepStatus::Pending && step.my_action { + let deps_complete = step + .depends_on + .iter() + .all(|dep| is_dependency_complete(flow_state, dep)); + if deps_complete { + steps_to_ready.insert(step.id.clone()); + } + } + } + for step in &mut flow_state.steps { + if steps_to_ready.contains(&step.id) { + step.status = StepStatus::Ready; + } + } +} + +/// Check if a specific participant has completed a specific step +fn check_participant_step_complete( + flow_name: &str, + session_id: &str, + viewer_email: &str, + participant_email: &str, + participant_role: &str, + step_id: &str, + require_shared: bool, +) -> bool { + let biovault_home = match biovault::config::get_biovault_home() { + Ok(h) => h, + Err(_) => return false, + }; + + let flow_dirs = participant_flow_dirs_for_viewer( + &biovault_home, + viewer_email, + participant_email, + flow_name, + session_id, + ); + + let mut saw_completed_without_share = false; + + for base in &flow_dirs { + let progress_file = base + .join("_progress") + .join(format!("{}_{}.json", participant_role, step_id)); + if !progress_file.exists() { + continue; + } + + // Read and check the status + if let Ok(content) = fs::read_to_string(&progress_file) { + if let Ok(status) = serde_json::from_str::(&content) { + let normalized = normalize_progress_status(&status.status); + if require_shared { + if normalized == "Shared" { + return true; + } + if normalized == "Completed" { + saw_completed_without_share = true; + } + continue; + } + if normalized == "Shared" || normalized == "Completed" { + return true; + } + } + } + } + + // Fallback for flows that only emit _progress/state.json. + for base in &flow_dirs { + let state_file = base.join("_progress").join("state.json"); + if !state_file.exists() { + continue; + } + let Ok(content) = fs::read_to_string(&state_file) else { + continue; + }; + let Ok(state_json) = serde_json::from_str::(&content) else { + continue; + }; + let Some(step_state) = state_json + .get("steps") + .and_then(|v| v.as_object()) + .and_then(|obj| obj.get(step_id)) + else { + continue; + }; + + let normalized = normalize_progress_status( + step_state + .get("status") + .and_then(|v| v.as_str()) + .unwrap_or("pending"), + ); + if require_shared { + if normalized == "Shared" { + return true; + } + if normalized == "Completed" { + saw_completed_without_share = true; + } + } else if normalized == "Shared" || normalized == "Completed" { + return true; + } + } + + // Treat syft.pub.yaml as definitive shared evidence when status files lag. + if require_shared + && flow_dirs + .iter() + .any(|base| has_step_share_marker(base, step_id)) + { + return true; + } + + if require_shared && saw_completed_without_share { + // Only relax shared-vs-completed for the legacy demo "multiparty" flow's + // generate step. Broadly treating Completed as Shared can let downstream + // steps (e.g. report_aggregate in Syqure flows) start before shared + // artifacts are durable/visible, which causes intermittent hangs. + let legacy_generate_barrier = + flow_name.eq_ignore_ascii_case("multiparty") && step_id == "generate"; + if legacy_generate_barrier { + return true; + } + } + + false +} + +/// Returns true when a dependency step can be treated as complete for this session. +/// This handles both local and cross-participant dependencies. +fn is_dependency_complete(flow_state: &MultipartyFlowState, dep_step_id: &str) -> bool { + let Some(dep_step) = flow_state.steps.iter().find(|s| s.id == dep_step_id) else { + // Unknown dependency should not block execution. + return true; + }; + + if matches!(dep_step.status, StepStatus::Completed | StepStatus::Shared) { + return true; + } + + if dep_step.target_emails.is_empty() { + return false; + } + + // For shared-output dependencies (e.g., step.share...), require Shared. + // Otherwise Completed or Shared is sufficient. + let require_shared = dep_step.shares_output; + + dep_step.target_emails.iter().all(|target_email| { + if let Some(participant) = flow_state + .participants + .iter() + .find(|p| &p.email == target_email) + { + check_participant_step_complete( + &flow_state.flow_name, + &flow_state.session_id, + &flow_state.my_email, + &participant.email, + &participant.role, + dep_step_id, + require_shared, + ) + } else { + false + } + }) +} + +fn is_step_terminal_for_success(step: &StepState) -> bool { + if step.shares_output { + step.status == StepStatus::Shared + } else { + matches!(step.status, StepStatus::Completed | StepStatus::Shared) + } +} + +fn collect_terminal_run_update(flow_state: &mut MultipartyFlowState) -> Option<(String, i64)> { + let run_id = flow_state.run_id?; + + if flow_state + .steps + .iter() + .any(|s| s.status == StepStatus::Failed) + { + if flow_state.status != FlowSessionStatus::Failed { + flow_state.status = FlowSessionStatus::Failed; + return Some(("failed".to_string(), run_id)); + } + return None; + } + + for step in &flow_state.steps { + if is_step_terminal_for_success(step) { + continue; + } + + if step.is_barrier { + return None; + } + + if step.target_emails.is_empty() { + if step.my_action { + return None; + } + continue; + } + + let require_shared = step.shares_output; + let all_targets_done = step.target_emails.iter().all(|target_email| { + flow_state + .participants + .iter() + .find(|p| p.email.eq_ignore_ascii_case(target_email)) + .map(|participant| { + check_participant_step_complete( + &flow_state.flow_name, + &flow_state.session_id, + &flow_state.my_email, + &participant.email, + &participant.role, + &step.id, + require_shared, + ) + }) + .unwrap_or(false) + }); + if !all_targets_done { + return None; + } + } + + if matches!( + flow_state.status, + FlowSessionStatus::Completed | FlowSessionStatus::Failed + ) { + return None; + } + + flow_state.status = FlowSessionStatus::Completed; + Some(("success".to_string(), run_id)) +} + +fn apply_terminal_run_update(app_state: &AppState, terminal_update: Option<(String, i64)>) { + let Some((status, run_id)) = terminal_update else { + return; + }; + if let Ok(biovault_db) = app_state.biovault_db.lock() { + let _ = biovault_db.update_flow_run_status(run_id, &status, true); + } +} + +#[tauri::command] +pub async fn send_flow_invitation( + _state: tauri::State<'_, AppState>, + thread_id: String, + flow_name: String, + flow_spec: serde_json::Value, + participant_roles: Vec, +) -> Result { + let session_id = uuid::Uuid::new_v4().to_string(); + + let config = + biovault::config::Config::load().map_err(|e| format!("Failed to load config: {}", e))?; + let my_email = config.email.clone(); + + let my_role = participant_roles + .iter() + .find(|p| p.email == my_email) + .map(|p| p.role.clone()) + .unwrap_or_else(|| "organizer".to_string()); + + preflight_validate_flow_modules(&flow_name, &flow_spec)?; + + let steps = parse_flow_steps(&flow_spec, &my_email, &participant_roles)?; + + // Set up work_dir for the proposer too (same as accept_flow_invitation) + let work_dir = get_shared_flow_path(&flow_name, &session_id)?; + fs::create_dir_all(&work_dir).map_err(|e| format!("Failed to create work dir: {}", e))?; + + // Create progress directory + let progress_dir = get_progress_path(&work_dir); + let _ = fs::create_dir_all(&progress_dir); + + // Log "joined" event for the proposer + append_progress_log(&progress_dir, "joined", None, &my_role); + + // Only coordination/progress data is globally shared. + let all_participant_emails: Vec = + participant_roles.iter().map(|p| p.email.clone()).collect(); + if let Err(err) = ensure_flow_subscriptions(&flow_name, &session_id, &all_participant_emails) { + eprintln!( + "[Multiparty] Warning: failed to add flow subscriptions: {}", + err + ); + } + let _ = create_syft_pub_yaml(&progress_dir, &my_email, &all_participant_emails); + write_progress_state(&progress_dir, &my_role, "joined", None, "Accepted"); + + if let Ok(Some(flow_source_dir)) = + publish_flow_source_for_session( + &flow_name, + &work_dir, + &my_email, + &all_participant_emails, + &flow_spec, + ) + { + crate::desktop_log!( + "📦 Published flow source for invitation: {}", + flow_source_dir.display() + ); + } + + let syqure_port_base = maybe_setup_mpc_channels( + &flow_spec, + &work_dir, + &my_email, + &all_participant_emails, + &session_id, + )?; + + let flow_state = MultipartyFlowState { + session_id: session_id.clone(), + flow_name: flow_name.clone(), + my_role, + my_email: my_email.clone(), + participants: participant_roles.clone(), + steps, + status: FlowSessionStatus::Accepted, + thread_id: thread_id.clone(), + work_dir: Some(work_dir), + run_id: None, + input_overrides: HashMap::new(), + flow_spec: Some(flow_spec.clone()), + syqure_port_base, + }; + let _ = persist_multiparty_state(&flow_state); + + { + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + sessions.insert(session_id.clone(), flow_state); + } + + let _message_type = MessageType::FlowInvitation { + flow_name, + session_id: session_id.clone(), + participants: participant_roles, + flow_spec, + }; + + Ok(session_id) +} + +#[tauri::command] +pub async fn accept_flow_invitation( + state: tauri::State<'_, AppState>, + session_id: String, + flow_name: String, + flow_spec: serde_json::Value, + participants: Vec, + auto_run_all: bool, + thread_id: Option, + input_overrides: Option>, +) -> Result { + // Check if already accepted with a persisted run. + // Sessions created by invitation sender may exist in memory without run_id; + // those must still execute the full accept path so the run card exists. + { + let sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + if let Some(existing) = sessions.get(&session_id) { + if existing.run_id.is_some() { + return Ok(existing.clone()); + } + } + } + + let config = + biovault::config::Config::load().map_err(|e| format!("Failed to load config: {}", e))?; + let my_email = config.email.clone(); + + let my_role = participants + .iter() + .find(|p| p.email == my_email) + .map(|p| p.role.clone()) + .ok_or_else(|| "You are not a participant in this flow".to_string())?; + + let mut steps = parse_flow_steps(&flow_spec, &my_email, &participants)?; + + if auto_run_all { + for step in &mut steps { + step.auto_run = true; + } + } + + // Create work directory in shared datasite path for cross-client syncing + // Structure: {datasite}/shared/flows/{flow_name}/{session_id}/ + let work_dir = get_shared_flow_path(&flow_name, &session_id)?; + + fs::create_dir_all(&work_dir).map_err(|e| format!("Failed to create work dir: {}", e))?; + + // Create progress directory for coordination + let progress_dir = get_progress_path(&work_dir); + fs::create_dir_all(&progress_dir) + .map_err(|e| format!("Failed to create progress dir: {}", e))?; + + // Only coordination/progress data is globally shared. + let all_participant_emails: Vec = + participants.iter().map(|p| p.email.clone()).collect(); + if let Err(err) = ensure_flow_subscriptions(&flow_name, &session_id, &all_participant_emails) { + eprintln!( + "[Multiparty] Warning: failed to add flow subscriptions: {}", + err + ); + } + let _ = create_syft_pub_yaml(&progress_dir, &my_email, &all_participant_emails); + + // Look up flow_id from database. + // If missing, import from invitation spec so Join works even when UI state is stale. + let mut flow_id = { + let biovault_db = state.biovault_db.lock().map_err(|e| e.to_string())?; + let flows = biovault_db.list_flows().map_err(|e| e.to_string())?; + flows.iter().find(|f| f.name == flow_name).map(|f| f.id) + }; + + if flow_id.is_none() { + let imported = super::flows::import_flow_from_json( + state.clone(), + super::flows::ImportFlowFromJsonRequest { + name: flow_name.clone(), + flow_json: flow_spec.clone(), + overwrite: false, + }, + ) + .await + .map_err(|e| format!("Failed to import flow for invitation acceptance: {}", e))?; + flow_id = Some(imported.id); + } + + let input_overrides = input_overrides.unwrap_or_default(); + + // Create run entry in database + let run_id = if let Some(fid) = flow_id { + let biovault_db = state.biovault_db.lock().map_err(|e| e.to_string())?; + let metadata = serde_json::json!({ + "type": "multiparty", + "session_id": session_id, + "my_role": my_role, + "participants": participants, + "input_overrides": input_overrides.clone(), + }); + let run_id = biovault_db + .create_flow_run_with_metadata( + fid, + &work_dir.to_string_lossy(), + Some(&work_dir.to_string_lossy()), + Some(&metadata.to_string()), + ) + .map_err(|e| format!("Failed to create run entry: {}", e))?; + Some(run_id) + } else { + return Err(format!("Flow '{}' is not available locally", flow_name)); + }; + + let syqure_port_base = maybe_setup_mpc_channels( + &flow_spec, + &work_dir, + &my_email, + &all_participant_emails, + &session_id, + )?; + + let flow_state = MultipartyFlowState { + session_id: session_id.clone(), + flow_name: flow_name.clone(), + my_role, + my_email, + participants, + steps, + status: FlowSessionStatus::Accepted, + thread_id: thread_id.unwrap_or_default(), + work_dir: Some(work_dir.clone()), + run_id, + input_overrides, + flow_spec: Some(flow_spec.clone()), + syqure_port_base, + }; + + // Save state to file for persistence + persist_multiparty_state(&flow_state)?; + + // Log "joined" event to progress.json + let progress_dir = get_progress_path(&work_dir); + append_progress_log(&progress_dir, "joined", None, &flow_state.my_role); + write_progress_state( + &progress_dir, + &flow_state.my_role, + "joined", + None, + "Accepted", + ); + + { + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + sessions.insert(session_id, flow_state.clone()); + } + + Ok(flow_state) +} + +#[tauri::command] +pub async fn get_multiparty_flow_state( + state: tauri::State<'_, AppState>, + session_id: String, +) -> Result, String> { + // Recover from restart: restore session snapshot from disk when memory map is empty. + let should_restore = { + let sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + !sessions.contains_key(&session_id) + }; + if should_restore { + if let Some(restored) = load_multiparty_state_from_disk(&session_id)? { + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + sessions.insert(session_id.clone(), restored); + } + } + + let (snapshot, terminal_update) = { + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + if let Some(flow_state) = sessions.get_mut(&session_id) { + reconcile_local_step_dirs(flow_state); + // Pull dependency-driven readiness from synced participant progress. + refresh_step_statuses(flow_state); + // Check if any WaitingForInputs steps can now proceed + update_barrier_steps(flow_state); + let terminal_update = collect_terminal_run_update(flow_state); + let _ = persist_multiparty_state(flow_state); + (Some(flow_state.clone()), terminal_update) + } else { + (None, None) + } + }; + + apply_terminal_run_update(state.inner(), terminal_update); + Ok(snapshot) +} + +/// Get progress status for all participants by reading their shared progress files +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ParticipantProgress { + pub email: String, + pub role: String, + pub steps: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ParticipantStepStatus { + pub step_id: String, + pub status: String, + pub timestamp: i64, + pub output_dir: Option, +} + +fn normalize_progress_status(raw: &str) -> String { + match raw.trim().to_ascii_lowercase().as_str() { + "shared" => "Shared".to_string(), + "sharing" => "Sharing".to_string(), + "completed" | "complete" | "success" | "succeeded" | "done" => "Completed".to_string(), + "running" | "in_progress" | "in-progress" => "Running".to_string(), + "ready" => "Ready".to_string(), + "waitingforinputs" | "waiting_for_inputs" | "waiting-for-inputs" => { + "WaitingForInputs".to_string() + } + "failed" | "error" => "Failed".to_string(), + _ => "Pending".to_string(), + } +} + +fn parse_progress_timestamp(value: Option<&serde_json::Value>) -> Option { + let value = value?; + if let Some(ts) = value.as_i64() { + return Some(ts); + } + if let Some(ts) = value.as_u64() { + return i64::try_from(ts).ok(); + } + let text = value.as_str()?; + chrono::DateTime::parse_from_rfc3339(text) + .ok() + .map(|dt| dt.timestamp()) +} + +fn resolve_step_output_dir_for_base( + base: &PathBuf, + step_number: usize, + step_id: &str, +) -> Option { + let canonical = canonicalize_step_dir_name(base, step_number, step_id); + if canonical.exists() { + return Some(canonical); + } + let padded = get_padded_step_path(base, step_number, step_id); + if padded.exists() { + return Some(padded); + } + None +} + +fn progress_status_rank(status: &str) -> i32 { + match status { + "Failed" => 100, + "Shared" => 90, + "Completed" => 80, + "Sharing" => 70, + "Running" => 60, + "Ready" => 50, + "WaitingForInputs" => 40, + _ => 10, // Pending / unknown + } +} + +fn should_replace_step_status( + existing: Option<&ParticipantStepStatus>, + candidate: &ParticipantStepStatus, +) -> bool { + let Some(existing) = existing else { + return true; + }; + + let existing_rank = progress_status_rank(&existing.status); + let candidate_rank = progress_status_rank(&candidate.status); + + if candidate_rank != existing_rank { + return candidate_rank > existing_rank; + } + + if candidate.timestamp != existing.timestamp { + return candidate.timestamp > existing.timestamp; + } + + // Prefer records that include a usable output path. + candidate.output_dir.is_some() && existing.output_dir.is_none() +} + +#[tauri::command] +pub async fn get_all_participant_progress( + session_id: String, +) -> Result, String> { + let (flow_name, my_email, participants, step_meta) = { + let sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + ( + flow_state.flow_name.clone(), + flow_state.my_email.clone(), + flow_state.participants.clone(), + flow_state + .steps + .iter() + .map(|s| (s.id.clone(), s.shares_output)) + .collect::>(), + ) + }; + + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + + let mut all_progress = Vec::new(); + + for participant in &participants { + let flow_dirs = participant_flow_dirs_for_viewer( + &biovault_home, + &my_email, + &participant.email, + &flow_name, + &session_id, + ); + let mut steps_by_id: HashMap = HashMap::new(); + + for (step_idx, (step_id, step_shares_output)) in step_meta.iter().enumerate() { + let step_number = step_idx + 1; + for base in &flow_dirs { + let progress_file = base + .join("_progress") + .join(format!("{}_{}.json", participant.role, step_id)); + if !progress_file.exists() { + continue; + } + if let Ok(content) = fs::read_to_string(&progress_file) { + if let Ok(status) = serde_json::from_str::(&content) { + let status_normalized = normalize_progress_status(&status.status); + let output_dir_candidate = + resolve_step_output_dir_for_base(base, step_number, step_id); + + let expose_outputs = if *step_shares_output { + status_normalized == "Shared" + } else { + status_normalized == "Completed" || status_normalized == "Shared" + }; + let output_dir = if expose_outputs { + output_dir_candidate.map(|p| p.to_string_lossy().to_string()) + } else { + None + }; + + let candidate = ParticipantStepStatus { + step_id: step_id.clone(), + status: status_normalized, + timestamp: status.timestamp, + output_dir, + }; + + if should_replace_step_status(steps_by_id.get(step_id), &candidate) { + steps_by_id.insert(step_id.clone(), candidate); + } + } + } + } + } + + // Fallback for flows that publish progress in _progress/state.json (e.g. Syqure flow runs). + for base in &flow_dirs { + let state_file = base.join("_progress").join("state.json"); + if !state_file.exists() { + continue; + } + if let Ok(content) = fs::read_to_string(&state_file) { + if let Ok(state_json) = serde_json::from_str::(&content) { + if let Some(step_obj) = state_json.get("steps").and_then(|v| v.as_object()) { + for (step_idx, (step_id, step_shares_output)) in + step_meta.iter().enumerate() + { + let Some(step_state) = step_obj.get(step_id) else { + continue; + }; + let raw_status = step_state + .get("status") + .and_then(|v| v.as_str()) + .unwrap_or("pending"); + let mut status_normalized = normalize_progress_status(raw_status); + if *step_shares_output + && status_normalized == "Completed" + && has_step_share_marker(base, step_id) + { + status_normalized = "Shared".to_string(); + } + + let step_number = step_idx + 1; + let output_dir_candidate = + resolve_step_output_dir_for_base(base, step_number, step_id); + let expose_outputs = if *step_shares_output { + status_normalized == "Shared" || status_normalized == "Completed" + } else { + status_normalized == "Completed" || status_normalized == "Shared" + }; + let output_dir = if expose_outputs { + output_dir_candidate.map(|p| p.to_string_lossy().to_string()) + } else { + None + }; + let timestamp = + parse_progress_timestamp(step_state.get("completed_at")) + .or_else(|| { + parse_progress_timestamp(step_state.get("updated_at")) + }) + .or_else(|| { + parse_progress_timestamp(step_state.get("started_at")) + }) + .unwrap_or_else(|| Utc::now().timestamp()); + + let candidate = ParticipantStepStatus { + step_id: step_id.clone(), + status: status_normalized, + timestamp, + output_dir, + }; + if should_replace_step_status(steps_by_id.get(step_id), &candidate) { + steps_by_id.insert(step_id.clone(), candidate); + } + } + } + } + } + } + + // Final fallback: infer status from on-disk step output directories when + // status files are missing/lagging on a peer view. + for (step_idx, (step_id, step_shares_output)) in step_meta.iter().enumerate() { + if steps_by_id.contains_key(step_id) { + continue; + } + let step_number = step_idx + 1; + for base in &flow_dirs { + let Some(output_dir_path) = + resolve_step_output_dir_for_base(base, step_number, step_id) + else { + continue; + }; + + // Ignore placeholder dirs that have no real output payload yet. + let has_payload = fs::read_dir(&output_dir_path) + .ok() + .map(|entries| { + entries.flatten().any(|entry| { + let path = entry.path(); + if !path.is_file() { + return false; + } + path.file_name() + .and_then(|n| n.to_str()) + .map(|name| name != "syft.pub.yaml") + .unwrap_or(false) + }) + }) + .unwrap_or(false); + if !has_payload { + continue; + } + + let is_shared = output_dir_path.join("syft.pub.yaml").exists(); + let inferred_status = if *step_shares_output { + if is_shared { + "Shared" + } else { + "Completed" + } + } else { + "Completed" + }; + + let inferred_ts = fs::metadata(&output_dir_path) + .ok() + .and_then(|m| m.modified().ok()) + .and_then(|mtime| mtime.duration_since(SystemTime::UNIX_EPOCH).ok()) + .and_then(|d| i64::try_from(d.as_secs()).ok()) + .unwrap_or_else(|| Utc::now().timestamp()); + + let candidate = ParticipantStepStatus { + step_id: step_id.clone(), + status: inferred_status.to_string(), + timestamp: inferred_ts, + output_dir: Some(output_dir_path.to_string_lossy().to_string()), + }; + if should_replace_step_status(steps_by_id.get(step_id), &candidate) { + steps_by_id.insert(step_id.clone(), candidate); + } + } + } + + let mut steps = Vec::new(); + for (step_id, _) in &step_meta { + if let Some(step_status) = steps_by_id.remove(step_id) { + steps.push(step_status); + } + } + + all_progress.push(ParticipantProgress { + email: participant.email.clone(), + role: participant.role.clone(), + steps, + }); + } + + Ok(all_progress) +} + +#[tauri::command] +pub async fn get_multiparty_participant_datasite_path( + session_id: String, + participant_email: String, +) -> Result { + { + let sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + let known = flow_state + .participants + .iter() + .any(|p| p.email.eq_ignore_ascii_case(&participant_email)); + if !known { + return Err(format!( + "Participant '{}' not part of session '{}'", + participant_email, session_id + )); + } + } + + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + + let direct = biovault_home.join("datasites").join(&participant_email); + if direct.exists() { + return Ok(direct.to_string_lossy().to_string()); + } + + if let Some(sandbox_root) = find_sandbox_root(&biovault_home) { + let sibling = sandbox_root + .join(&participant_email) + .join("datasites") + .join(&participant_email); + if sibling.exists() { + return Ok(sibling.to_string_lossy().to_string()); + } + } + + Ok(direct.to_string_lossy().to_string()) +} + +/// Get progress log entries from all participants (JSONL format) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LogEntry { + pub participant: String, + pub role: String, + pub timestamp: String, + pub event: String, + pub step_id: Option, + pub message: Option, +} + +#[tauri::command] +pub async fn get_participant_logs(session_id: String) -> Result, String> { + let (flow_name, my_email, participants) = { + let sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + ( + flow_state.flow_name.clone(), + flow_state.my_email.clone(), + flow_state.participants.clone(), + ) + }; + + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + + let mut all_logs = Vec::new(); + let mut seen = HashSet::new(); + + let mut push_log = |log: LogEntry| { + let key = format!( + "{}|{}|{}|{}", + log.participant, + log.event, + log.step_id.clone().unwrap_or_default(), + log.timestamp + ); + if seen.insert(key) { + all_logs.push(log); + } + }; + + for participant in &participants { + for progress_dir in participant_flow_dirs_for_viewer( + &biovault_home, + &my_email, + &participant.email, + &flow_name, + &session_id, + ) + .into_iter() + .map(|base| base.join("_progress")) + { + // Read canonical log.jsonl and legacy progress.json (both may exist + // and contain useful events). + let log_candidates = [ + progress_dir.join("log.jsonl"), + progress_dir.join("progress.json"), + ]; + for path in log_candidates.into_iter().filter(|p| p.exists()) { + if let Ok(content) = fs::read_to_string(&path) { + // JSONL format - one JSON object per line + for line in content.lines() { + if let Ok(entry) = serde_json::from_str::(line) { + push_log(LogEntry { + participant: participant.email.clone(), + role: participant.role.clone(), + timestamp: entry + .get("timestamp") + .and_then(|t| t.as_str()) + .unwrap_or("") + .to_string(), + event: entry + .get("event") + .and_then(|e| e.as_str()) + .unwrap_or("") + .to_string(), + step_id: entry + .get("step_id") + .and_then(|s| s.as_str()) + .or_else(|| entry.get("step").and_then(|s| s.as_str())) + .and_then(|s| { + let trimmed = s.trim(); + if trimmed.is_empty() + || trimmed.eq_ignore_ascii_case("null") + || trimmed.eq_ignore_ascii_case("undefined") + { + None + } else { + Some(trimmed.to_string()) + } + }), + message: entry + .get("message") + .and_then(|m| m.as_str()) + .map(|m| m.to_string()), + }); + } + } + } + } + + // Fallback: synthesize events from shared step status files. + // This keeps the activity log populated even when log.jsonl lags behind. + if let Ok(entries) = fs::read_dir(&progress_dir) { + for entry in entries.flatten() { + let path = entry.path(); + if !path.is_file() { + continue; + } + let Some(name) = path.file_name().and_then(|n| n.to_str()) else { + continue; + }; + if name == "state.json" + || name == "log.jsonl" + || name == "progress.json" + || name == "syft.pub.yaml" + { + continue; + } + if !name.ends_with(".json") { + continue; + } + if let Ok(content) = fs::read_to_string(&path) { + if let Ok(status) = serde_json::from_str::(&content) { + let event = match status.status.as_str() { + "Shared" => "step_shared", + "Completed" => "step_completed", + _ => continue, + }; + let timestamp = Utc + .timestamp_opt(status.timestamp, 0) + .single() + .map(|dt| dt.to_rfc3339()) + .unwrap_or_default(); + push_log(LogEntry { + participant: participant.email.clone(), + role: if status.role.is_empty() { + participant.role.clone() + } else { + status.role.clone() + }, + timestamp, + event: event.to_string(), + step_id: Some(status.step_id.clone()), + message: None, + }); + } + } + } + } + } + } + + // Sort by timestamp descending (newest first) + all_logs.sort_by(|a, b| b.timestamp.cmp(&a.timestamp)); + + Ok(all_logs) +} + +#[tauri::command] +pub async fn get_multiparty_step_diagnostics( + session_id: String, + step_id: String, +) -> Result { + let (flow_name, my_email, participants) = { + let sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + ( + flow_state.flow_name.clone(), + flow_state.my_email.clone(), + flow_state.participants.clone(), + ) + }; + + let now_ms = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_millis() as u64; + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + + let mut channels = Vec::new(); + for base in participant_flow_dirs_for_viewer( + &biovault_home, + &my_email, + &my_email, + &flow_name, + &session_id, + ) { + let mpc_dir = base.join("_mpc"); + if !mpc_dir.exists() { + continue; + } + channels = collect_mpc_tcp_channel_diagnostics(&mpc_dir); + if !channels.is_empty() { + break; + } + } + + let mut all_emails: BTreeSet = participants + .iter() + .map(|p| p.email.clone()) + .filter(|e| !e.trim().is_empty()) + .collect(); + all_emails.insert(my_email.clone()); + + let mut peers = Vec::new(); + for email in all_emails { + let mut peer = MultipartyPeerTelemetryDiagnostics { + email: email.clone(), + telemetry_present: false, + mode: "unknown".to_string(), + mode_short: "unknown".to_string(), + status: "pending".to_string(), + updated_ms: None, + age_ms: None, + tx_packets: 0, + tx_bytes: 0, + tx_quic_packets: 0, + tx_ws_packets: 0, + tx_avg_send_ms: 0.0, + rx_packets: 0, + rx_bytes: 0, + rx_avg_write_ms: 0.0, + ws_fallbacks: 0, + }; + for path in hotlink_telemetry_candidates(&biovault_home, &email) { + if let Some(snapshot) = read_hotlink_telemetry(&path) { + peer.telemetry_present = true; + peer.mode = snapshot.mode.clone(); + peer.mode_short = short_hotlink_mode(&snapshot.mode).to_string(); + peer.updated_ms = snapshot.updated_ms; + peer.tx_packets = snapshot.tx_packets; + peer.tx_bytes = snapshot.tx_bytes; + peer.tx_quic_packets = snapshot.tx_quic_packets; + peer.tx_ws_packets = snapshot.tx_ws_packets; + peer.tx_avg_send_ms = snapshot.tx_avg_send_ms; + peer.rx_packets = snapshot.rx_packets; + peer.rx_bytes = snapshot.rx_bytes; + peer.rx_avg_write_ms = snapshot.rx_avg_write_ms; + peer.ws_fallbacks = snapshot.ws_fallbacks; + break; + } + } + peer.age_ms = peer + .updated_ms + .map(|updated| now_ms.saturating_sub(updated)); + peer.status = if peer.telemetry_present { + if peer.age_ms.unwrap_or(0) <= 15_000 { + "connected".to_string() + } else { + "stale".to_string() + } + } else { + "pending".to_string() + }; + peers.push(peer); + } + peers.sort_by(|a, b| a.email.cmp(&b.email)); + + Ok(MultipartyStepDiagnostics { + session_id, + step_id, + flow_name, + local_email: my_email, + generated_at_ms: now_ms, + channels, + peers, + }) +} + +#[tauri::command] +pub async fn get_multiparty_step_logs( + state: tauri::State<'_, AppState>, + session_id: String, + step_id: String, + lines: Option, +) -> Result { + let (run_id, work_dir, flow_name, my_email, flow_state_snapshot) = { + let sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + ( + flow_state.run_id, + flow_state.work_dir.clone(), + flow_state.flow_name.clone(), + flow_state.my_email.clone(), + flow_state.clone(), + ) + }; + + let lines = lines.unwrap_or(200).clamp(20, 2000); + let mut sections: Vec = Vec::new(); + let readiness = collect_step_readiness_blockers(&flow_state_snapshot, &step_id); + if !readiness.is_empty() { + sections.push(format!("[Readiness Debug]\n{}", readiness.join("\n"))); + } + + // 1) Private per-step logs (local-only, never synced). + let private_log_path = get_private_step_log_path(&session_id, &step_id)?; + if private_log_path.exists() { + let private_tail = read_tail_lines(&private_log_path, lines)?; + if !private_tail.trim().is_empty() { + sections.push(format!("[Private Step Log]\n{}", private_tail)); + } + } + + // 1b) Progress event stream for this local participant (JSONL under shared _progress). + // This captures step_started/step_completed/step_shared even when execution was backend-driven. + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + let mut progress_candidates: Vec = Vec::new(); + for base in participant_flow_dirs_for_viewer( + &biovault_home, + &my_email, + &my_email, + &flow_name, + &session_id, + ) { + progress_candidates.push(base.join("_progress").join("log.jsonl")); + progress_candidates.push(base.join("_progress").join("progress.json")); + } + progress_candidates.sort(); + progress_candidates.dedup(); + for progress_path in progress_candidates { + if !progress_path.exists() { + continue; + } + if let Ok(content) = fs::read_to_string(&progress_path) { + let mut matched: Vec = Vec::new(); + let mut unscoped: Vec = Vec::new(); + for line in content.lines() { + if line.trim().is_empty() { + continue; + } + let Ok(entry) = serde_json::from_str::(line) else { + continue; + }; + let entry_step = entry + .get("step_id") + .or_else(|| entry.get("step")) + .and_then(|v| v.as_str()) + .unwrap_or(""); + let timestamp = entry + .get("timestamp") + .and_then(|v| v.as_str()) + .unwrap_or(""); + let role = entry.get("role").and_then(|v| v.as_str()).unwrap_or(""); + let event = entry.get("event").and_then(|v| v.as_str()).unwrap_or(""); + let message = entry.get("message").and_then(|v| v.as_str()).unwrap_or(""); + let mut rendered = format!("{} [{}] {}", timestamp, role, event); + if !message.is_empty() { + rendered.push_str(": "); + rendered.push_str(message); + } + if entry_step == step_id { + matched.push(rendered); + } else if entry_step.is_empty() { + unscoped.push(rendered); + } + } + if !matched.is_empty() { + let selected: Vec = matched + .into_iter() + .rev() + .take(lines) + .collect::>() + .into_iter() + .rev() + .collect(); + sections.push(format!( + "[Progress Log: {}]\n{}", + progress_path.display(), + selected.join("\n") + )); + break; + } else if !unscoped.is_empty() { + let selected: Vec = unscoped + .into_iter() + .rev() + .take(lines) + .collect::>() + .into_iter() + .rev() + .collect(); + sections.push(format!( + "[Progress Log (unscoped): {}]\n{}", + progress_path.display(), + selected.join("\n") + )); + break; + } + } + } + + // 1c) Local MPC transport diagnostics (important for secure_aggregate visibility). + for base in participant_flow_dirs_for_viewer( + &biovault_home, + &my_email, + &my_email, + &flow_name, + &session_id, + ) { + let mpc_dir = base.join("_mpc"); + if !mpc_dir.exists() { + continue; + } + + let transport_log = mpc_dir.join("file_transport.log"); + if transport_log.exists() { + let transport_tail = read_tail_lines(&transport_log, lines)?; + if !transport_tail.trim().is_empty() { + sections.push(format!( + "[MPC Transport Log: {}]\n{}", + transport_log.display(), + transport_tail + )); + } + } + + let request_count = count_files_recursive(&mpc_dir, ".request"); + let response_count = count_files_recursive(&mpc_dir, ".response"); + if request_count > 0 || response_count > 0 { + sections.push(format!( + "[MPC File Progress]\nrequests={} responses={}", + request_count, response_count + )); + } + + let tcp_status = collect_mpc_tcp_marker_status(&mpc_dir); + if !tcp_status.is_empty() { + sections.push(format!( + "[MPC TCP Proxy Status: {}]\n{}", + mpc_dir.display(), + tcp_status.join("\n") + )); + } + } + + // 2) Fallback to flow.log (run-local execution log), filtered by step id. + let mut log_candidates: Vec = Vec::new(); + if let Some(wd) = work_dir { + log_candidates.push(wd.join("flow.log")); + } + if let Some(run_id) = run_id { + let biovault_db = state.biovault_db.lock().map_err(|e| e.to_string())?; + if let Some(run) = biovault_db + .get_flow_run(run_id) + .map_err(|e| e.to_string())? + { + if let Some(results_dir) = run.results_dir.as_ref() { + log_candidates.push(PathBuf::from(results_dir).join("flow.log")); + } + log_candidates.push(PathBuf::from(run.work_dir).join("flow.log")); + } + } + log_candidates.sort(); + log_candidates.dedup(); + + for log_path in log_candidates { + if !log_path.exists() { + continue; + } + if let Ok(text) = fs::read_to_string(&log_path) { + let selected = select_step_log_lines(&text, &step_id, lines); + if !selected.trim().is_empty() { + sections.push(format!("[Run Log: {}]\n{}", log_path.display(), selected)); + break; + } + } + } + + if step_id == "secure_aggregate" { + if let Ok(desktop_log) = env::var("BIOVAULT_DESKTOP_LOG_FILE") { + let desktop_log_path = PathBuf::from(desktop_log); + if desktop_log_path.exists() { + let raw_tail = read_tail_lines(&desktop_log_path, lines.saturating_mul(6))?; + if !raw_tail.trim().is_empty() { + let filtered: Vec = raw_tail + .lines() + .filter(|line| { + let lc = line.to_ascii_lowercase(); + lc.contains("syqure") + || lc.contains("hotlink") + || lc.contains("tcp proxy") + || lc.contains("sequre_transport") + || lc.contains("sequre_communication_port") + }) + .map(|line| line.to_string()) + .collect(); + if !filtered.is_empty() { + let selected: Vec = filtered + .into_iter() + .rev() + .take(lines) + .collect::>() + .into_iter() + .rev() + .collect(); + sections.push(format!( + "[Desktop Syqure Log: {}]\n{}", + desktop_log_path.display(), + selected.join("\n") + )); + } + } + } + } + } + + if sections.is_empty() { + return Ok("No run logs available yet.".to_string()); + } + + Ok(sections.join("\n\n")) +} + +#[tauri::command] +pub async fn set_step_auto_run( + session_id: String, + step_id: String, + auto_run: bool, +) -> Result<(), String> { + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let state = sessions + .get_mut(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + + let step = state + .steps + .iter_mut() + .find(|s| s.id == step_id) + .ok_or_else(|| "Step not found".to_string())?; + + step.auto_run = auto_run; + let _ = persist_multiparty_state(state); + Ok(()) +} + +#[tauri::command] +pub async fn run_flow_step( + state: tauri::State<'_, AppState>, + session_id: String, + step_id: String, +) -> Result { + let ( + work_dir, + step_number, + step_numbers_by_id, + flow_name, + my_email, + participants, + input_overrides, + module_path, + module_ref, + with_bindings, + flow_spec, + syqure_port_base, + all_steps_snapshot, + ) = { + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get_mut(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + + // Get step info and check if it can run + let (step_deps, step_status, is_my_action, module_path, module_ref, with_bindings) = { + let step = flow_state + .steps + .iter() + .find(|s| s.id == step_id) + .ok_or_else(|| "Step not found".to_string())?; + ( + step.depends_on.clone(), + step.status.clone(), + step.my_action, + step.module_path.clone(), + step.module_ref.clone(), + step.with_bindings.clone(), + ) + }; + + if !is_my_action { + return Err("This step is not your action".to_string()); + } + + if step_status == StepStatus::Failed { + if let Some(s) = flow_state.steps.iter_mut().find(|s| s.id == step_id) { + println!( + "[Multiparty] Retrying failed step '{}' — resetting to Ready", + step_id + ); + s.status = StepStatus::Ready; + append_private_step_log(&session_id, &step_id, "step_retry"); + } + } else if step_status != StepStatus::Ready && step_status != StepStatus::Pending { + return Err(format!( + "Step is not ready to run (status: {:?})", + step_status + )); + } + + // Always validate dependencies before running (including cross-participant deps). + for dep_id in &step_deps { + if !is_dependency_complete(flow_state, dep_id) { + return Err(format!( + "Cannot run step '{}': dependency '{}' is not satisfied yet", + step_id, dep_id + )); + } + } + + // Get step number (1-indexed) for path construction + let step_number = flow_state + .steps + .iter() + .position(|s| s.id == step_id) + .map(|i| i + 1) + .unwrap_or(0); + + let step_numbers_by_id = flow_state + .steps + .iter() + .enumerate() + .map(|(i, s)| (s.id.clone(), i + 1)) + .collect::>(); + + let all_steps_snapshot = flow_state.steps.clone(); + + // Pre-run with-binding readiness check: do not start step execution until + // required upstream artifacts are actually readable. + if let Some(step_ref) = flow_state.steps.iter().find(|s| s.id == step_id) { + let (input_status, waiting_on, waiting_reason) = check_step_input_readiness( + flow_state, + step_ref, + &step_numbers_by_id, + &all_steps_snapshot, + ); + if input_status == StepStatus::WaitingForInputs { + if let Some(step_mut) = flow_state.steps.iter_mut().find(|s| s.id == step_id) { + step_mut.status = StepStatus::WaitingForInputs; + step_mut.input_waiting_on = waiting_on.clone(); + step_mut.input_waiting_reason = waiting_reason.clone(); + } + let _ = persist_multiparty_state(flow_state); + return Err(waiting_reason.unwrap_or_else(|| { + if waiting_on.is_empty() { + format!("Step '{}' is waiting for required shared inputs", step_id) + } else { + format!( + "Step '{}' is waiting for shared inputs from {}", + step_id, + waiting_on.join(", ") + ) + } + })); + } + } + + let step = flow_state + .steps + .iter_mut() + .find(|s| s.id == step_id) + .ok_or_else(|| "Step not found".to_string())?; + + step.status = StepStatus::Running; + step.input_waiting_on.clear(); + step.input_waiting_reason = None; + append_private_step_log(&session_id, &step_id, "step_started"); + if let Some(ref work_dir) = flow_state.work_dir { + let progress_dir = get_progress_path(work_dir); + let _ = fs::create_dir_all(&progress_dir); + append_progress_log( + &progress_dir, + "step_started", + Some(&step_id), + &flow_state.my_role, + ); + write_progress_state( + &progress_dir, + &flow_state.my_role, + "step_started", + Some(&step_id), + "Running", + ); + } + let _ = persist_multiparty_state(flow_state); + + ( + flow_state.work_dir.clone(), + step_number, + step_numbers_by_id, + flow_state.flow_name.clone(), + flow_state.my_email.clone(), + flow_state.participants.clone(), + flow_state.input_overrides.clone(), + module_path, + module_ref, + with_bindings, + flow_state.flow_spec.clone(), + flow_state.syqure_port_base, + all_steps_snapshot, + ) + }; + + // Step output path: {flow_path}/{step_number}-{step_id}/ + let step_output_dir = work_dir + .as_ref() + .map(|d| canonicalize_step_dir_name(d, step_number, &step_id)); + + if let Some(ref dir) = step_output_dir { + fs::create_dir_all(dir).map_err(|e| format!("Failed to create output dir: {}", e))?; + } + + if step_id == "generate" { + let output_file = step_output_dir + .as_ref() + .map(|d| d.join("numbers.json")) + .ok_or_else(|| "No output directory".to_string())?; + + let numbers: Vec = (0..5).map(|_| rand::random::() % 100 + 1).collect(); + let sum: i32 = numbers.iter().sum(); + + let result = serde_json::json!({ + "session_id": session_id, + "numbers": numbers, + "sum": sum + }); + + fs::write(&output_file, serde_json::to_string_pretty(&result).unwrap()) + .map_err(|e| format!("Failed to write output: {}", e))?; + } else if step_id == "aggregate" { + // Get flow state to find contributors + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + + let output_file = step_output_dir + .as_ref() + .map(|d| d.join("result.json")) + .ok_or_else(|| "No output directory".to_string())?; + + let mut all_numbers: Vec = Vec::new(); + let mut contributions: Vec = Vec::new(); + + // Read contributions from each contributor's synced datasite. + // Prefer legacy "2-share_contribution/numbers.json" when present, and + // fall back to "1-generate/numbers.json" (share-as-part-of-step mode). + for participant in &participants { + // Skip non-contributors (aggregator doesn't contribute) + if participant.role == "aggregator" { + continue; + } + + let synced_base = biovault_home + .join("datasites") + .join(&participant.email) + .join("shared") + .join("flows") + .join(&flow_name) + .join(&session_id); + let sandbox_base = biovault_home.parent().map(|parent| { + parent + .join(&participant.email) + .join("datasites") + .join(&participant.email) + .join("shared") + .join("flows") + .join(&flow_name) + .join(&session_id) + }); + let path_candidates = [ + synced_base.join("1-generate").join("numbers.json"), + synced_base + .join("2-share_contribution") + .join("numbers.json"), + sandbox_base + .as_ref() + .map(|p| p.join("1-generate").join("numbers.json")) + .unwrap_or_default(), + sandbox_base + .as_ref() + .map(|p| p.join("2-share_contribution").join("numbers.json")) + .unwrap_or_default(), + ]; + let contributor_step_path = match path_candidates.iter().find(|p| p.exists()) { + Some(path) => path.clone(), + None => continue, // Skip this contributor if no data found + }; + + if let Ok(content) = fs::read_to_string(&contributor_step_path) { + if let Ok(data) = serde_json::from_str::(&content) { + if let Some(nums) = data.get("numbers").and_then(|n| n.as_array()) { + for n in nums { + if let Some(i) = n.as_i64() { + all_numbers.push(i as i32); + } + } + contributions.push(serde_json::json!({ + "from": participant.email, + "data": data + })); + } + } + } + } + + let total_sum: i32 = all_numbers.iter().sum(); + + let result = serde_json::json!({ + "session_id": session_id, + "contributions": contributions, + "all_numbers": all_numbers, + "total_sum": total_sum, + "count": all_numbers.len() + }); + + fs::write(&output_file, serde_json::to_string_pretty(&result).unwrap()) + .map_err(|e| format!("Failed to write output: {}", e))?; + } else if module_ref.is_some() || module_path.is_some() { + // ---- Generic module execution path (replaces all hardcoded step handlers) ---- + let output_dir = step_output_dir + .as_ref() + .ok_or_else(|| "No output directory".to_string())?; + let module_dir = + resolve_module_directory(&flow_name, module_path.as_deref(), module_ref.as_deref()) + .ok_or_else(|| { + format!("Failed to resolve module directory for step '{}'", step_id) + })?; + validate_module_assets_exist(&module_dir).map_err(|e| { + format!("Step '{}' failed preflight: {}", step_id, e) + })?; + + let biovault_home = biovault::config::get_biovault_home() + .map_err(|e| format!("Failed to get BioVault home: {}", e))?; + + let flow_spec_ref = flow_spec + .as_ref() + .ok_or_else(|| "Flow spec not stored in session state".to_string())?; + + let step_args = resolve_with_bindings( + &with_bindings, + &input_overrides, + flow_spec_ref, + &flow_name, + &session_id, + &my_email, + &biovault_home, + &step_numbers_by_id, + &all_steps_snapshot, + work_dir.as_ref().ok_or("No work directory")?, + &participants, + )?; + + append_private_step_log( + &session_id, + &step_id, + &format!( + "generic_execute: module={} args={:?}", + module_dir.display(), + step_args + ), + ); + + let party_emails: Vec = participants.iter().map(|p| p.email.clone()).collect(); + + let dynamic_ctx = run_dynamic::DynamicExecutionContext { + current_datasite: Some(my_email.clone()), + datasites_override: Some(party_emails.clone()), + syftbox_data_dir: Some(biovault_home.to_string_lossy().to_string()), + run_id: Some(session_id.clone()), + flow_name: Some(flow_name.clone()), + syqure_port_base, + tauri_context: true, + }; + + let party_id_idx = party_emails + .iter() + .position(|e| e == &my_email) + .unwrap_or(0); + append_private_step_log( + &session_id, + &step_id, + &format!( + "syqure_coordination: session_id={} party_id={}/{} email={} port_base={} backend={} module_dir={} diag_file=/tmp/biovault-syqure-diag-{}-p{}.log", + session_id, + party_id_idx, + party_emails.len(), + my_email, + syqure_port_base.map(|b| b.to_string()).unwrap_or_else(|| "none".to_string()), + env::var("BV_SYFTBOX_BACKEND").unwrap_or_else(|_| "unset".to_string()), + module_dir.display(), + session_id, + party_id_idx, + ), + ); + + eprintln!("[tauri-trace] run_flow_step calling execute_dynamic step={} party={}/{} pid={} thread={:?}", + step_id, party_id_idx, party_emails.len(), std::process::id(), std::thread::current().id()); + // Important: pass party/session context through task-local scope. + // Avoid reintroducing process-global env mutation here; concurrent + // Tauri parties can race and produce non-deterministic Syqure wiring. + let run_result = run_dynamic::with_execution_context( + dynamic_ctx, + run_dynamic::execute_dynamic( + &module_dir.to_string_lossy(), + step_args, + false, + false, + Some(output_dir.to_string_lossy().to_string()), + run_dynamic::RunSettings::default(), + ), + ) + .await + .map_err(|e| format!("Step '{}' failed: {}", step_id, e)); + eprintln!( + "[tauri-trace] execute_dynamic returned step={} party={} result={:?}", + step_id, + party_id_idx, + run_result.as_ref().map(|_| "ok").map_err(|e| e.clone()) + ); + + if let Err(err) = run_result { + append_private_step_log(&session_id, &step_id, &format!("step_failed: {}", err)); + + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let mut terminal_update = None; + if let Some(flow_state) = sessions.get_mut(&session_id) { + if let Some(step) = flow_state.steps.iter_mut().find(|s| s.id == step_id) { + step.status = StepStatus::Failed; + step.input_waiting_on.clear(); + step.input_waiting_reason = None; + } + flow_state.status = FlowSessionStatus::Failed; + if let Some(ref work_dir) = flow_state.work_dir { + let progress_dir = get_progress_path(work_dir); + let _ = fs::create_dir_all(&progress_dir); + let shared_status = SharedStepStatus { + step_id: step_id.clone(), + role: flow_state.my_role.clone(), + status: "Failed".to_string(), + timestamp: Utc::now().timestamp(), + }; + let status_file = + progress_dir.join(format!("{}_{}.json", flow_state.my_role, step_id)); + if let Ok(json) = serde_json::to_string_pretty(&shared_status) { + let _ = fs::write(&status_file, json); + } + append_progress_log( + &progress_dir, + "step_failed", + Some(&step_id), + &flow_state.my_role, + ); + write_progress_state( + &progress_dir, + &flow_state.my_role, + "step_failed", + Some(&step_id), + "Failed", + ); + } + terminal_update = collect_terminal_run_update(flow_state); + let _ = persist_multiparty_state(flow_state); + } + drop(sessions); + apply_terminal_run_update(state.inner(), terminal_update); + return Err(err); + } + } + + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get_mut(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + + let step = flow_state + .steps + .iter_mut() + .find(|s| s.id == step_id) + .ok_or_else(|| "Step not found".to_string())?; + + step.status = StepStatus::Completed; + step.output_dir = step_output_dir.clone(); + step.input_waiting_on.clear(); + step.input_waiting_reason = None; + append_private_step_log(&session_id, &step_id, "step_completed"); + + // Save step status to shared _progress folder for cross-client syncing + if let Some(ref work_dir) = flow_state.work_dir { + let progress_dir = get_progress_path(work_dir); + let _ = fs::create_dir_all(&progress_dir); + let shared_status = SharedStepStatus { + step_id: step_id.clone(), + role: flow_state.my_role.clone(), + status: "Completed".to_string(), + timestamp: Utc::now().timestamp(), + }; + let status_file = progress_dir.join(format!("{}_{}.json", flow_state.my_role, step_id)); + if let Ok(json) = serde_json::to_string_pretty(&shared_status) { + let _ = fs::write(&status_file, json); + } + // Also append to progress.json log + append_progress_log( + &progress_dir, + "step_completed", + Some(&step_id), + &flow_state.my_role, + ); + write_progress_state( + &progress_dir, + &flow_state.my_role, + "step_completed", + Some(&step_id), + "Completed", + ); + } + + let completed_step = step.clone(); + + // Update dependent steps: if all their dependencies are now met, mark them Ready + update_dependent_steps(flow_state, &step_id); + + let terminal_update = collect_terminal_run_update(flow_state); + let _ = persist_multiparty_state(flow_state); + + drop(sessions); + apply_terminal_run_update(state.inner(), terminal_update); + + Ok(completed_step) +} + +#[tauri::command] +pub async fn share_step_outputs( + state: tauri::State<'_, AppState>, + session_id: String, + step_id: String, +) -> Result<(), String> { + let (output_dir, share_to_emails, my_email, thread_id, flow_name, step_name, participants) = { + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get_mut(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + + let (output_dir, step_name, share_to) = { + let step = flow_state + .steps + .iter_mut() + .find(|s| s.id == step_id) + .ok_or_else(|| "Step not found".to_string())?; + + if step.status != StepStatus::Completed { + return Err("Step must be completed before sharing".to_string()); + } + + if !step.shares_output { + return Err("This step does not share outputs".to_string()); + } + + step.status = StepStatus::Sharing; + append_private_step_log(&session_id, &step_id, "step_sharing_started"); + + ( + step.output_dir.clone(), + step.name.clone(), + step.share_to.clone(), + ) + }; + let (groups, default_to_actual) = flow_state + .flow_spec + .as_ref() + .map(|spec| build_group_map_from_participants(&flow_state.participants, spec)) + .unwrap_or_default(); + let datasites_order: Vec = flow_state + .flow_spec + .as_ref() + .and_then(|spec| spec.get("inputs")) + .and_then(|i| i.get("datasites")) + .and_then(|d| d.get("default")) + .and_then(|arr| arr.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect::>() + }) + .unwrap_or_default() + .into_iter() + .map(|email| default_to_actual.get(&email).cloned().unwrap_or(email)) + .collect::>(); + let share_to_emails = resolve_share_recipients( + &share_to, + &flow_state.participants, + &flow_state.my_email, + &datasites_order, + &groups, + ); + let _ = persist_multiparty_state(flow_state); + + ( + output_dir, + share_to_emails, + flow_state.my_email.clone(), + flow_state.thread_id.clone(), + flow_state.flow_name.clone(), + step_name, + flow_state.participants.clone(), + ) + }; + + let output_dir = output_dir.ok_or_else(|| "No output directory".to_string())?; + + // Create syft.pub.yaml in output directory to enable SyftBox sync + create_syft_pub_yaml(&output_dir, &my_email, &share_to_emails)?; + + let terminal_update = { + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get_mut(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + + let step = flow_state + .steps + .iter_mut() + .find(|s| s.id == step_id) + .ok_or_else(|| "Step not found".to_string())?; + + step.status = StepStatus::Shared; + step.outputs_shared = true; + append_private_step_log(&session_id, &step_id, "step_shared"); + + // Save step status to shared _progress folder for cross-client syncing + if let Some(ref work_dir) = flow_state.work_dir { + let progress_dir = get_progress_path(work_dir); + let _ = fs::create_dir_all(&progress_dir); + let shared_status = SharedStepStatus { + step_id: step_id.clone(), + role: flow_state.my_role.clone(), + status: "Shared".to_string(), + timestamp: Utc::now().timestamp(), + }; + let status_file = progress_dir.join(format!("{}_{}.json", flow_state.my_role, step_id)); + if let Ok(json) = serde_json::to_string_pretty(&shared_status) { + let _ = fs::write(&status_file, json); + } + // Also append to progress.json log + append_progress_log( + &progress_dir, + "step_shared", + Some(&step_id), + &flow_state.my_role, + ); + write_progress_state( + &progress_dir, + &flow_state.my_role, + "step_shared", + Some(&step_id), + "Shared", + ); + } + + // Update dependent steps: if all their dependencies are now met, mark them Ready + update_dependent_steps(flow_state, &step_id); + let terminal_update = collect_terminal_run_update(flow_state); + let _ = persist_multiparty_state(flow_state); + terminal_update + }; + + apply_terminal_run_update(state.inner(), terminal_update); + + // Sharing outputs should also publish a chat artifact message for flow participants. + if !thread_id.trim().is_empty() { + let _ = publish_step_outputs_message( + &session_id, + &step_id, + &output_dir, + &thread_id, + &flow_name, + &my_email, + &step_name, + &participants, + true, + )?; + } + + Ok(()) +} + +#[tauri::command] +pub async fn get_step_output_files( + session_id: String, + step_id: String, +) -> Result, String> { + let sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + + let step = flow_state + .steps + .iter() + .find(|s| s.id == step_id) + .ok_or_else(|| "Step not found".to_string())?; + + let output_dir = step + .output_dir + .as_ref() + .ok_or_else(|| "No output directory".to_string())?; + + let mut files = Vec::new(); + if output_dir.exists() { + for entry in fs::read_dir(output_dir).map_err(|e| e.to_string())? { + let entry = entry.map_err(|e| e.to_string())?; + files.push(entry.path().to_string_lossy().to_string()); + } + } + + Ok(files) +} + +#[tauri::command] +pub async fn receive_flow_step_outputs( + _state: tauri::State<'_, AppState>, + session_id: String, + step_id: String, + from_role: String, + files: HashMap>, +) -> Result<(), String> { + let mut sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get_mut(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + + // Find step number + let step_number = flow_state + .steps + .iter() + .position(|s| s.id == step_id) + .map(|i| i + 1) + .unwrap_or(0); + + // Create inputs directory: {flow_path}/_inputs/{step_number}-{step_id}/{from_role}/ + let inputs_dir = flow_state + .work_dir + .as_ref() + .map(|d| { + d.join("_inputs") + .join(format!("{}-{}", step_number, step_id)) + .join(&from_role) + }) + .ok_or_else(|| "No work directory".to_string())?; + + fs::create_dir_all(&inputs_dir).map_err(|e| format!("Failed to create inputs dir: {}", e))?; + + for (filename, content) in files { + let file_path = inputs_dir.join(&filename); + fs::write(&file_path, content) + .map_err(|e| format!("Failed to write file {}: {}", filename, e))?; + } + + for step in &mut flow_state.steps { + if step.status == StepStatus::WaitingForInputs { + step.status = StepStatus::Ready; + } + } + let _ = persist_multiparty_state(flow_state); + + Ok(()) +} + +/// Build a map of group name -> list of emails from participants +/// Also builds groups based on common role prefixes (e.g., contributor1, contributor2 -> contributors) +/// Returns (groups, default_to_actual_map) where default_to_actual_map maps default datasite emails to actual participant emails +fn build_group_map_from_participants( + participants: &[FlowParticipant], + flow_spec: &serde_json::Value, +) -> (HashMap>, HashMap) { + let spec_root = flow_spec_root(flow_spec); + let mut groups: HashMap> = HashMap::new(); + let mut default_to_actual: HashMap = HashMap::new(); + + let all_emails: Vec = participants.iter().map(|p| p.email.clone()).collect(); + groups.insert("all".to_string(), all_emails.clone()); + + // Build role-based groups first (robust fallback when flow datasite groups are unavailable). + let mut role_groups: HashMap> = HashMap::new(); + for p in participants { + role_groups + .entry(p.role.clone()) + .or_default() + .push(p.email.clone()); + + let base_role = p.role.trim_end_matches(|c: char| c.is_ascii_digit()); + if base_role != p.role { + let plural_role = format!("{}s", base_role); + role_groups + .entry(plural_role) + .or_default() + .push(p.email.clone()); + } + } + + // Parse default datasite list from canonical flow schema. + let default_datasites: Vec = spec_root + .get("inputs") + .and_then(|i| i.get("datasites")) + .and_then(|d| d.get("default")) + .and_then(|arr| arr.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect() + }) + .or_else(|| { + spec_root + .get("datasites") + .and_then(|d| d.get("all")) + .and_then(|arr| arr.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect() + }) + }) + .unwrap_or_default(); + + // Default mapping: + // 1) direct literal-email matches + // 2) stable index fallback + for (i, default_email) in default_datasites.iter().enumerate() { + if let Some(p) = participants.iter().find(|p| p.email == *default_email) { + default_to_actual.insert(default_email.clone(), p.email.clone()); + } else if let Some(p) = participants.get(i) { + default_to_actual.insert(default_email.clone(), p.email.clone()); + } + } + + // Parse explicit datasite groups from flow spec when available. + if let Some(spec_groups) = spec_root + .get("datasites") + .and_then(|d| d.get("groups")) + .and_then(|g| g.as_object()) + { + for (group_name, group_def) in spec_groups { + let includes = group_def + .get("include") + .and_then(|v| v.as_array()) + .cloned() + .unwrap_or_default(); + + let fallback_group_members = role_groups.get(group_name).cloned().unwrap_or_default(); + let mut resolved_members: Vec = Vec::new(); + + for (include_idx, include_item) in includes.iter().enumerate() { + let Some(token) = include_item.as_str() else { + continue; + }; + let trimmed = token.trim(); + + if trimmed == "{datasites[*]}" || trimmed.eq_ignore_ascii_case("all") { + resolved_members.extend(all_emails.clone()); + continue; + } + + if trimmed.contains('@') { + // Either a real email or a default placeholder that already looks like email. + let mapped = default_to_actual + .get(trimmed) + .cloned() + .unwrap_or_else(|| trimmed.to_string()); + resolved_members.push(mapped); + continue; + } + + if trimmed.starts_with("{datasites[") && trimmed.ends_with("]}") { + let idx_str = &trimmed["{datasites[".len()..trimmed.len() - 2]; + if let Ok(idx) = idx_str.parse::() { + if let Some(default_email) = default_datasites.get(idx) { + if let Some(actual) = default_to_actual.get(default_email) { + resolved_members.push(actual.clone()); + continue; + } + } + } + } + + if let Some(mapped_group) = role_groups.get(trimmed) { + resolved_members.extend(mapped_group.clone()); + continue; + } + + // Ambiguous include token; preserve ordering against role-group fallback. + if let Some(member) = fallback_group_members.get(include_idx) { + resolved_members.push(member.clone()); + } + } + + if !resolved_members.is_empty() { + resolved_members.sort(); + resolved_members.dedup(); + groups.insert(group_name.clone(), resolved_members); + } + } + } + + // Merge role groups where spec groups did not define them. + for (role, members) in role_groups { + groups.entry(role).or_insert(members); + } + + if !groups.contains_key("contributors") { + let inferred_contributors: Vec = participants + .iter() + .filter(|p| { + let role = p.role.to_ascii_lowercase(); + role.starts_with("client") || role.starts_with("contributor") + }) + .map(|p| p.email.clone()) + .collect(); + if !inferred_contributors.is_empty() { + groups.insert("contributors".to_string(), inferred_contributors.clone()); + groups + .entry("clients".to_string()) + .or_insert(inferred_contributors); + } + } + + if let Some(contributors) = groups.get("contributors").cloned() { + groups.insert("clients".to_string(), contributors); + } + + // Legacy compatibility: allow old flow targets like client1@sandbox.local + // and aggregator@sandbox.local to resolve to runtime participant emails. + let aggregator_email = participants + .iter() + .find(|p| p.role.eq_ignore_ascii_case("aggregator")) + .map(|p| p.email.clone()) + .or_else(|| { + groups + .get("aggregator") + .and_then(|members| members.first()) + .cloned() + }); + if let Some(agg) = aggregator_email { + for alias in [ + "aggregator", + "aggregator@sandbox.local", + "aggregator@openmined.org", + ] { + default_to_actual.insert(alias.to_string(), agg.clone()); + default_to_actual.insert(alias.to_ascii_lowercase(), agg.clone()); + } + } + + let client_like: Vec = participants + .iter() + .filter(|p| { + let role = p.role.to_ascii_lowercase(); + role == "clients" + || role == "contributors" + || role.starts_with("client") + || role.starts_with("contributor") + }) + .map(|p| p.email.clone()) + .collect(); + for (idx, email) in client_like.iter().enumerate() { + let n = idx + 1; + for alias in [ + format!("client{}", n), + format!("client{}@sandbox.local", n), + format!("client{}@openmined.org", n), + format!("contributor{}", n), + format!("contributor{}@sandbox.local", n), + format!("contributor{}@openmined.org", n), + ] { + default_to_actual.insert(alias.clone(), email.clone()); + default_to_actual.insert(alias.to_ascii_lowercase(), email.clone()); + } + } + + println!( + "[Multiparty] build_group_map_from_participants: groups={:?}, default_to_actual={:?}", + groups, default_to_actual + ); + + (groups, default_to_actual) +} + +/// Extract share recipients from step.share[*].read. +/// Accepts both: +/// - share..permissions.read (canonical flow schema) +/// - share..read (flattened schema variants) +fn extract_share_to(step: &serde_json::Value) -> Vec { + let mut share_to = Vec::new(); + + if let Some(share_block) = step.get("share").and_then(|s| s.as_object()) { + for (_share_name, share_def) in share_block { + let read_arr = share_def + .get("permissions") + .and_then(|perms| perms.get("read")) + .and_then(|r| r.as_array()) + .or_else(|| share_def.get("read").and_then(|r| r.as_array())); + + if let Some(read_arr) = read_arr { + for reader in read_arr { + if let Some(target) = reader.as_str() { + if !share_to.iter().any(|existing| existing == target) { + share_to.push(target.to_string()); + } + } + } + } + } + } + + share_to +} + +fn resolve_share_recipients( + raw_targets: &[String], + participants: &[FlowParticipant], + my_email: &str, + datasites_order: &[String], + groups: &HashMap>, +) -> Vec { + let mut resolved: HashSet = HashSet::new(); + + for target in raw_targets { + let t = target.trim(); + if t.is_empty() { + continue; + } + + if t.eq_ignore_ascii_case("all") || t == "{datasites[*]}" || t == "*" { + for p in participants { + resolved.insert(p.email.clone()); + } + continue; + } + + if t == "{datasite.current}" { + resolved.insert(my_email.to_string()); + continue; + } + + if t.contains('@') { + resolved.insert(t.to_string()); + continue; + } + + if t.starts_with("{datasites[") && t.ends_with("]}") { + let idx_str = &t["{datasites[".len()..t.len() - 2]; + if let Ok(idx) = idx_str.parse::() { + if let Some(email) = datasites_order.get(idx) { + resolved.insert(email.clone()); + } else if let Some(p) = participants.get(idx) { + resolved.insert(p.email.clone()); + } + } + continue; + } + + // {groups.aggregator} or {groups.clients} → resolve group name to emails + if t.starts_with("{groups.") && t.ends_with("}") { + let group_name = &t["{groups.".len()..t.len() - 1]; + if let Some(members) = groups.get(group_name) { + for email in members { + resolved.insert(email.clone()); + } + } + continue; + } + + if let Some(p) = participants.iter().find(|p| p.role == t) { + resolved.insert(p.email.clone()); + continue; + } + + let singular = t.trim_end_matches('s'); + for p in participants { + let role = p.role.as_str(); + let role_singular = role.trim_end_matches(|c: char| c.is_ascii_digit()); + if role == t + || role_singular == singular + || (t == "clients" + && (role.starts_with("client") || role.starts_with("contributor"))) + { + resolved.insert(p.email.clone()); + } + } + } + + resolved.into_iter().collect() +} + +/// Get targets as a list of group names/emails +/// Handles both original YAML structure (run.targets) and converted FlowSpec (runs_on) +fn get_step_targets(step: &serde_json::Value) -> Vec { + // Try converted FlowSpec structure first (runs_on) + if let Some(runs_on) = step.get("runs_on") { + match runs_on { + serde_json::Value::String(s) => return vec![s.clone()], + serde_json::Value::Array(arr) => { + return arr + .iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect(); + } + _ => {} + } + } + + // Fallback to original YAML structure (run.targets) + if let Some(run) = step.get("run") { + if let Some(targets) = run.get("targets") { + match targets { + serde_json::Value::String(s) => return vec![s.clone()], + serde_json::Value::Array(arr) => { + return arr + .iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect(); + } + _ => {} + } + } + } + + // Barrier steps + if let Some(barrier) = step.get("barrier") { + if let Some(targets) = barrier.get("targets") { + match targets { + serde_json::Value::String(s) => return vec![s.clone()], + serde_json::Value::Array(arr) => { + return arr + .iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect(); + } + _ => {} + } + } + } + + Vec::new() +} + +fn mapped_target_email( + target: &str, + default_to_actual: &HashMap, +) -> Option { + default_to_actual + .get(target) + .cloned() + .or_else(|| default_to_actual.get(&target.to_ascii_lowercase()).cloned()) +} + +fn collect_step_refs_from_value(value: &serde_json::Value, refs: &mut HashSet) { + match value { + serde_json::Value::String(text) => { + let mut offset = 0usize; + while let Some(found) = text[offset..].find("step.") { + let start = offset + found + 5; // skip `step.` + let remainder = &text[start..]; + let dep_id: String = remainder + .chars() + .take_while(|c| c.is_ascii_alphanumeric() || *c == '_' || *c == '-') + .collect(); + if !dep_id.is_empty() { + refs.insert(dep_id); + } + offset = start; + } + } + serde_json::Value::Array(items) => { + for item in items { + collect_step_refs_from_value(item, refs); + } + } + serde_json::Value::Object(map) => { + for v in map.values() { + collect_step_refs_from_value(v, refs); + } + } + _ => {} + } +} + +fn extract_with_step_dependencies( + step: &serde_json::Value, + known_step_ids: &HashSet, +) -> Vec { + let mut refs: HashSet = HashSet::new(); + if let Some(with_block) = step.get("with") { + collect_step_refs_from_value(with_block, &mut refs); + } + let mut deps: Vec = refs + .into_iter() + .filter(|id| known_step_ids.contains(id)) + .collect(); + deps.sort(); + deps +} + +fn parse_flow_steps( + flow_spec: &serde_json::Value, + my_email: &str, + participants: &[FlowParticipant], +) -> Result, String> { + let spec_root = flow_spec_root(flow_spec); + let steps = spec_root + .get("steps") + .and_then(|s| s.as_array()) + .ok_or_else(|| "Invalid flow spec: missing steps".to_string())?; + + // Build groups from participants (not from flow spec, which loses group info) + // Also get default-to-actual email mapping for resolved targets + let (groups, default_to_actual) = build_group_map_from_participants(participants, flow_spec); + println!( + "[Multiparty] parse_flow_steps: my_email={}, groups={:?}", + my_email, groups + ); + let known_step_ids: HashSet = steps + .iter() + .filter_map(|s| s.get("id").and_then(|v| v.as_str()).map(|s| s.to_string())) + .collect(); + let mut result = Vec::new(); + + for (step_index, step) in steps.iter().enumerate() { + let id = step + .get("id") + .and_then(|v| v.as_str()) + .unwrap_or("unknown") + .to_string(); + + let name = step + .get("name") + .and_then(|v| v.as_str()) + .unwrap_or(&id) + .to_string(); + + let description = step + .get("description") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + + let explicit_depends_on: Vec = step + .get("depends_on") + .and_then(|v| v.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect() + }) + .unwrap_or_default(); + let inferred_depends_on = extract_with_step_dependencies(step, &known_step_ids); + let mut depends_set: HashSet = HashSet::new(); + for dep in explicit_depends_on + .into_iter() + .chain(inferred_depends_on.into_iter()) + { + if dep != id { + depends_set.insert(dep); + } + } + let mut depends_on: Vec = depends_set.into_iter().collect(); + + // Check if this is a barrier step + let is_barrier = step.get("barrier").is_some(); + let barrier_wait_for = step + .get("barrier") + .and_then(|b| b.get("wait_for")) + .and_then(|w| w.as_str()) + .map(|s| s.to_string()); + + // Some imported FlowSpec variants flatten `with` references and lose explicit + // dependency links. Keep UI sequencing stable by falling back to previous-step + // ordering when no dependencies are present. + if depends_on.is_empty() && !is_barrier && step_index > 0 { + if let Some(prev_step_id) = steps + .get(step_index - 1) + .and_then(|s| s.get("id")) + .and_then(|v| v.as_str()) + { + if prev_step_id != id { + depends_on.push(prev_step_id.to_string()); + } + } + } + depends_on.sort(); + depends_on.dedup(); + + // Determine if my email is in the targets for this step + let targets = get_step_targets(step); + let my_action = if !targets.is_empty() { + // Check if my email is in the targets (handles both direct emails and group names) + targets.iter().any(|target| { + // Check if it's a direct email match + if target == my_email { + return true; + } + // Check if it's a group name and I'm in that group + if let Some(group_members) = groups.get(target) { + if group_members.contains(&my_email.to_string()) { + return true; + } + } + // Check if target is a default datasite email that maps to my email + // (handles case where runs_on was resolved to default emails) + if let Some(actual_email) = mapped_target_email(target, &default_to_actual) { + if actual_email == my_email { + return true; + } + } + false + }) + } else if is_barrier { + // Barrier applies to everyone - they all wait + true + } else { + false + }; + + // Check for share block (canonical schema) + let share_to = extract_share_to(step); + let shares_output = !share_to.is_empty() || step.get("share").is_some(); + + // Resolve targets to actual participant emails + let mut target_emails: Vec = targets + .iter() + .flat_map(|target| { + // Check if it's a group name + if let Some(group_members) = groups.get(target) { + group_members.clone() + } else if let Some(actual_email) = mapped_target_email(target, &default_to_actual) + { + // Target is a default datasite email, map to actual participant + vec![actual_email.clone()] + } else { + // It's a direct email or unknown - keep as is + vec![target.clone()] + } + }) + .collect(); + target_emails.sort(); + target_emails.dedup(); + + let module_ref = step + .get("uses") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + let module_path = module_ref.as_ref().and_then(|module_id| { + spec_root + .get("modules") + .and_then(|m| m.get(module_id)) + .and_then(|m| m.get("source")) + .and_then(|s| s.get("path")) + .and_then(|p| p.as_str()) + .map(|s| s.to_string()) + }); + let code_preview = serde_yaml::to_string(step).ok(); + + let with_bindings: HashMap = step + .get("with") + .and_then(|w| w.as_object()) + .map(|obj| obj.iter().map(|(k, v)| (k.clone(), v.clone())).collect()) + .unwrap_or_default(); + + // Determine initial status + let initial_status = if is_barrier { + // Barrier steps start as WaitingForInputs + StepStatus::WaitingForInputs + } else if depends_on.is_empty() && my_action { + StepStatus::Ready + } else { + StepStatus::Pending + }; + + result.push(StepState { + id, + name, + description, + auto_run: false, + status: initial_status, + my_action, + shares_output, + share_to, + depends_on, + output_dir: None, + outputs_shared: false, + targets, + target_emails, + is_barrier, + barrier_wait_for, + code_preview, + module_ref, + module_path, + with_bindings, + input_waiting_on: Vec::new(), + input_waiting_reason: None, + }); + } + + Ok(result) +} + +fn publish_step_outputs_message( + session_id: &str, + step_id: &str, + output_dir: &PathBuf, + thread_id: &str, + flow_name: &str, + my_email: &str, + step_name: &str, + participants: &[FlowParticipant], + send_message: bool, +) -> Result { + use base64::{engine::general_purpose::STANDARD, Engine}; + + // Read output files and encode as base64 + let mut results_data: Vec = vec![]; + if output_dir.exists() { + for entry in fs::read_dir(output_dir).map_err(|e| e.to_string())? { + let entry = entry.map_err(|e| e.to_string())?; + let path = entry.path(); + if path.is_file() { + let file_name = path + .file_name() + .map(|n| n.to_string_lossy().to_string()) + .unwrap_or_default(); + + // Skip syft.pub.yaml + if file_name == "syft.pub.yaml" { + continue; + } + + let content = fs::read(&path) + .map_err(|e| format!("Failed to read file {}: {}", file_name, e))?; + + let base64_content = STANDARD.encode(&content); + + let is_text = file_name.ends_with(".csv") + || file_name.ends_with(".tsv") + || file_name.ends_with(".txt") + || file_name.ends_with(".json") + || file_name.ends_with(".yaml") + || file_name.ends_with(".yml"); + + results_data.push(serde_json::json!({ + "file_name": file_name, + "content_base64": base64_content, + "size_bytes": content.len(), + "is_text": is_text, + })); + } + } + } + + if results_data.is_empty() { + return Err("No output files to share".to_string()); + } + + // Get all participant emails except self for recipients + let recipients: Vec = participants + .iter() + .filter(|p| p.email != my_email) + .map(|p| p.email.clone()) + .collect(); + let mut group_participants: Vec = + participants.iter().map(|p| p.email.clone()).collect(); + if !group_participants.iter().any(|e| e == my_email) { + group_participants.push(my_email.to_string()); + } + group_participants.sort(); + group_participants.dedup(); + + // Create message body + let body = format!( + "📊 Results from step '{}' are ready!\n\n{} file(s) attached. Click to download.", + step_name, + results_data.len() + ); + let results_manifest: Vec = results_data + .iter() + .map(|entry| { + serde_json::json!({ + "file_name": entry.get("file_name").cloned().unwrap_or(serde_json::Value::Null), + "size_bytes": entry.get("size_bytes").cloned().unwrap_or(serde_json::Value::Null), + "is_text": entry.get("is_text").cloned().unwrap_or(serde_json::Value::Bool(false)), + }) + }) + .collect(); + + if !send_message || thread_id.trim().is_empty() || recipients.is_empty() { + return Ok(serde_json::json!({ + "success": true, + "files_shared": results_data.len(), + "recipients": recipients, + })); + } + + // Load config and message system + let config = + biovault::config::Config::load().map_err(|e| format!("Failed to load config: {}", e))?; + + let (db, sync) = biovault::cli::commands::messages::init_message_system(&config) + .map_err(|e| format!("Failed to init message system: {}", e))?; + + // Send to each recipient (or to the thread if group chat) + for recipient in &recipients { + let mut msg = biovault::messages::models::Message::new( + my_email.to_string(), + recipient.clone(), + body.clone(), + ); + + msg.subject = Some(format!("Flow Results: {} - {}", flow_name, step_name)); + msg.thread_id = Some(thread_id.to_string()); + + msg.metadata = Some(serde_json::json!({ + "group_chat": { + "participants": group_participants, + "is_group": true + }, + "flow_results": { + "flow_name": flow_name, + "session_id": session_id, + "step_id": step_id, + "step_name": step_name, + "sender": my_email, + "files": results_manifest, + } + })); + + db.insert_message(&msg) + .map_err(|e| format!("Failed to store message: {}", e))?; + + // Try to sync/send via RPC + let _ = sync.send_message(&msg.id); + } + + Ok(serde_json::json!({ + "success": true, + "files_shared": results_data.len(), + "recipients": recipients, + })) +} + +/// Share step outputs to the chat thread so all participants can see and download +#[tauri::command] +pub async fn share_step_outputs_to_chat( + state: tauri::State<'_, AppState>, + session_id: String, + step_id: String, +) -> Result { + // If the step has not been shared yet, share it first. + // `share_step_outputs` already posts one results message when a thread is present. + let should_share_first = { + let sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + let step = flow_state + .steps + .iter() + .find(|s| s.id == step_id) + .ok_or_else(|| "Step not found".to_string())?; + match step.status { + StepStatus::Completed => true, + StepStatus::Shared => false, + _ => { + return Err("Step must be completed/shared before posting to chat".to_string()); + } + } + }; + + if should_share_first { + share_step_outputs(state.clone(), session_id.clone(), step_id.clone()).await?; + } + + let (output_dir, thread_id, flow_name, my_email, step_name, participants) = { + let sessions = FLOW_SESSIONS.lock().map_err(|e| e.to_string())?; + let flow_state = sessions + .get(&session_id) + .ok_or_else(|| "Flow session not found".to_string())?; + let step = flow_state + .steps + .iter() + .find(|s| s.id == step_id) + .ok_or_else(|| "Step not found".to_string())?; + ( + step.output_dir.clone(), + flow_state.thread_id.clone(), + flow_state.flow_name.clone(), + flow_state.my_email.clone(), + step.name.clone(), + flow_state.participants.clone(), + ) + }; + let output_dir = output_dir.ok_or_else(|| "No output directory".to_string())?; + + // If we just shared, do not duplicate the chat message. + publish_step_outputs_message( + &session_id, + &step_id, + &output_dir, + &thread_id, + &flow_name, + &my_email, + &step_name, + &participants, + !should_share_first, + ) +} diff --git a/src-tauri/src/commands/sessions.rs b/src-tauri/src/commands/sessions.rs index efa27c20..c6476e24 100644 --- a/src-tauri/src/commands/sessions.rs +++ b/src-tauri/src/commands/sessions.rs @@ -136,6 +136,7 @@ fn send_session_invite_message( if let Err(e) = messages::send_message(MessageSendRequest { to: Some(peer_email.to_string()), + recipients: None, body, subject: Some(subject), reply_to: None, @@ -179,6 +180,7 @@ fn send_session_invite_response_message( if let Err(e) = messages::send_message(MessageSendRequest { to: Some(requester.to_string()), + recipients: None, body, subject: Some(subject), reply_to, @@ -1611,6 +1613,7 @@ pub fn send_session_chat_message(session_id: String, body: String) -> Result Result<(), String> { crate::desktop_log!("⚠️ Failed to register profile for {}: {}", email, err); } + // Re-assert onboarding email after dependency/profile side effects. + // Some dependency save paths can rewrite config while onboarding is in-flight. + let final_config_path = biovault_path.join("config.yaml"); + match biovault::config::Config::load() { + Ok(mut cfg) => { + if cfg.email.trim() != email.trim() { + cfg.email = email.clone(); + if let Err(err) = cfg.save(&final_config_path) { + crate::desktop_log!( + "⚠️ Final onboarding email persist failed for {}: {}", + email, + err + ); + } else { + crate::desktop_log!("✅ Final onboarding email persisted for {}", email); + } + } + } + Err(err) => { + crate::desktop_log!( + "⚠️ Failed final onboarding config load for {}: {}", + email, + err + ); + } + } + Ok(()) } @@ -468,12 +495,6 @@ pub fn get_settings() -> Result { let biovault_home = biovault::config::get_biovault_home() .map_err(|e| format!("Failed to get BioVault home: {}", e))?; let settings_path = biovault_home.join("database").join("settings.json"); - let legacy_settings_path = dirs::desktop_dir() - .or_else(|| dirs::home_dir().map(|h| h.join("Desktop"))) - .unwrap_or_else(|| PathBuf::from(".")) - .join("BioVault") - .join("database") - .join("settings.json"); println!( "⚙️ [get_settings] settings_path: {}", settings_path.display() @@ -484,11 +505,6 @@ pub fn get_settings() -> Result { let content = fs::read_to_string(&settings_path) .map_err(|e| format!("Failed to read settings: {}", e))?; serde_json::from_str(&content).map_err(|e| format!("Failed to parse settings: {}", e))? - } else if legacy_settings_path.exists() { - // Back-compat migration from legacy Desktop/BioVault location. - let content = fs::read_to_string(&legacy_settings_path) - .map_err(|e| format!("Failed to read legacy settings: {}", e))?; - serde_json::from_str(&content).unwrap_or_default() } else { println!("⚙️ [get_settings] settings.json does NOT exist, using defaults"); Settings::default() diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 462f630a..82e88dd8 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -10,7 +10,7 @@ use tauri::{ menu::{CheckMenuItemBuilder, MenuBuilder, MenuItemBuilder}, path::BaseDirectory, tray::TrayIconBuilder, - AppHandle, Emitter, Manager, + AppHandle, Emitter, Manager, WebviewUrl, }; use tauri_plugin_dialog::{DialogExt, MessageDialogButtons, MessageDialogKind}; @@ -560,6 +560,80 @@ fn expose_bundled_binaries(app: &tauri::App) { } } } + + // Expose bundled syqure as SEQURE_NATIVE_BIN (native runner). + // Keep parity with Windows behavior: respect an explicit valid override, else use + // bundled/dev candidates. + let mut allow_syqure_override = true; + if let Ok(existing) = std::env::var("SEQURE_NATIVE_BIN") { + let existing = existing.trim().to_string(); + if !existing.is_empty() { + let existing_path = std::path::PathBuf::from(&existing); + if existing_path.exists() { + crate::desktop_log!("🔧 Using pre-set SEQURE_NATIVE_BIN: {}", existing); + allow_syqure_override = false; + } else { + crate::desktop_log!( + "⚠️ SEQURE_NATIVE_BIN was set to a missing path ({}); falling back to bundled candidates", + existing_path.display() + ); + } + } + } + + if allow_syqure_override { + let syqure_candidates = [ + "syqure/syqure".to_string(), + "resources/syqure/syqure".to_string(), + ]; + let mut syqure_path = syqure_candidates + .iter() + .find_map(|path| app.path().resolve(path, BaseDirectory::Resource).ok()) + .filter(|p| p.exists()); + + if syqure_path.is_none() { + if let Ok(cwd) = std::env::current_dir() { + let dev_paths = [ + cwd.join("src-tauri") + .join("resources") + .join("syqure") + .join("syqure"), + cwd.join("resources").join("syqure").join("syqure"), + std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("resources") + .join("syqure") + .join("syqure"), + cwd.join("syqure") + .join("target") + .join("release") + .join("syqure"), + cwd.join("..") + .join("syqure") + .join("target") + .join("release") + .join("syqure"), + std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("..") + .join("syqure") + .join("target") + .join("release") + .join("syqure"), + ]; + for p in dev_paths { + if p.exists() { + syqure_path = Some(p); + break; + } + } + } + } + + if let Some(p) = syqure_path.filter(|p| p.exists()) { + let s = p.to_string_lossy().to_string(); + std::env::set_var("SEQURE_NATIVE_BIN", &s); + crate::desktop_log!("🔧 Using bundled SEQURE_NATIVE_BIN: {}", s); + } + } } #[cfg(target_os = "windows")] @@ -1027,8 +1101,14 @@ pub fn run() { load_biovault_email(&Some(biovault_home_dir.clone())) }; - // Build window title - include debug info if BIOVAULT_DEBUG_BANNER is set - let window_title = if std::env::var("BIOVAULT_DEBUG_BANNER") + // Build window title - allow explicit override, debug banner, or default + let window_title = if let Ok(custom) = std::env::var("BIOVAULT_WINDOW_TITLE") { + if custom.trim().is_empty() { + format!("BioVault - {}", email) + } else { + custom + } + } else if std::env::var("BIOVAULT_DEBUG_BANNER") .map(|v| matches!(v.to_lowercase().as_str(), "1" | "true" | "yes")) .unwrap_or(false) { @@ -1037,6 +1117,15 @@ pub fn run() { format!("BioVault - {}", email) }; + // Isolate WebView data (localStorage, cookies) per instance using biovault_home_dir. + // Without this, multiple Tauri instances with the same bundle identifier share + // a single WKWebView data store on macOS, causing identity/state cross-contamination. + let webview_data_dir = if !profile_picker_mode && !biovault_home_dir.as_os_str().is_empty() { + Some(biovault_home_dir.join("webview-data")) + } else { + None + }; + let (conn, queue_processor_paused) = if profile_picker_mode { ( Connection::open_in_memory().expect("Could not open in-memory desktop database"), @@ -1274,9 +1363,28 @@ pub fn run() { }); } - if let Some(window) = app.get_webview_window("main") { - let _ = window.set_title(&window_title); + // Create the main window programmatically so we can set data_directory + // for per-instance WebView data isolation (localStorage, cookies, etc.). + let mut wb = tauri::WebviewWindowBuilder::new( + app, + "main", + WebviewUrl::App(Default::default()), + ) + .title(&window_title) + .inner_size(1100.0, 700.0) + .min_inner_size(900.0, 600.0); + + if let Some(ref data_dir) = webview_data_dir { + crate::desktop_log!( + "🔒 WebView data directory: {}", + data_dir.display() + ); + wb = wb.data_directory(data_dir.clone()); + } + let window = wb.build()?; + + { // Handle window close event - minimize to tray instead of quitting let window_clone = window.clone(); let app_handle = app.handle().clone(); @@ -1535,6 +1643,7 @@ pub fn run() { get_flows, get_runs_base_dir, create_flow, + import_flow_from_json, load_flow_editor, save_flow_editor, delete_flow, @@ -1694,6 +1803,21 @@ pub fn run() { add_dataset_to_session, remove_dataset_from_session, list_session_datasets, + // Multiparty flow commands + commands::multiparty::send_flow_invitation, + commands::multiparty::accept_flow_invitation, + commands::multiparty::get_multiparty_flow_state, + commands::multiparty::get_all_participant_progress, + commands::multiparty::get_multiparty_participant_datasite_path, + commands::multiparty::get_participant_logs, + commands::multiparty::get_multiparty_step_diagnostics, + commands::multiparty::set_step_auto_run, + commands::multiparty::run_flow_step, + commands::multiparty::share_step_outputs, + commands::multiparty::share_step_outputs_to_chat, + commands::multiparty::get_step_output_files, + commands::multiparty::get_multiparty_step_logs, + commands::multiparty::receive_flow_step_outputs, ]) .build(tauri::generate_context!()) .expect("error while building tauri application"); diff --git a/src-tauri/src/logging.rs b/src-tauri/src/logging.rs index e452977c..e6e27d31 100644 --- a/src-tauri/src/logging.rs +++ b/src-tauri/src/logging.rs @@ -100,8 +100,24 @@ fn write_log_line(level: LogLevel, message: &str) -> io::Result<()> { Ok(()) } +const NOISY_PATTERNS: &[&str] = &[ + "sync actions:", + "scan_remote: server returned", + "sync reconcile start:", + "files, ignored=", + "SyftBox queue poll", + "GET http://127.0.0.1:7938/v1/sync/status", +]; + +fn is_noisy_log(message: &str) -> bool { + NOISY_PATTERNS.iter().any(|pat| message.contains(pat)) +} + /// Append a timestamped log entry to the desktop log file. pub fn log_desktop_event(level: LogLevel, message: &str) { + if is_noisy_log(message) { + return; + } let _ = write_log_line(level, message); } diff --git a/src-tauri/src/types/mod.rs b/src-tauri/src/types/mod.rs index bd8b0fe7..37420a75 100644 --- a/src-tauri/src/types/mod.rs +++ b/src-tauri/src/types/mod.rs @@ -226,6 +226,9 @@ pub struct MessageSyncResult { #[derive(Deserialize)] pub struct MessageSendRequest { pub to: Option, + /// Multiple recipients for group messages (if set, takes precedence over `to`) + #[serde(default)] + pub recipients: Option>, pub body: String, pub subject: Option, pub reply_to: Option, diff --git a/src-tauri/src/ws_bridge.rs b/src-tauri/src/ws_bridge.rs index 58513778..5f24779a 100644 --- a/src-tauri/src/ws_bridge.rs +++ b/src-tauri/src/ws_bridge.rs @@ -364,6 +364,7 @@ fn get_commands_list() -> serde_json::Value { cmd_async("import_flow", "flows", false), cmd_async("import_flow_from_message", "flows", false), cmd_async("import_flow_from_request", "flows", false), + cmd_async("import_flow_from_json", "flows", false), cmd_long("import_flow_with_deps", "flows", false), cmd_long("run_flow", "flows", false), cmd_async("get_flow_runs", "flows", true), @@ -3033,6 +3034,19 @@ async fn execute_command(app: &AppHandle, cmd: &str, args: Value) -> Result { + let request: crate::commands::flows::ImportFlowFromJsonRequest = + serde_json::from_value( + args.get("request") + .cloned() + .ok_or_else(|| "Missing request".to_string())?, + ) + .map_err(|e| format!("Failed to parse request: {}", e))?; + let result = crate::commands::flows::import_flow_from_json(state.clone(), request) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } "run_flow" => { // Try to get the main window for event emission (optional in WS bridge mode) let window = app.get_webview_window("main"); @@ -3853,6 +3867,277 @@ async fn execute_command(app: &AppHandle, cmd: &str, args: Value) -> Result { + let thread_id: String = serde_json::from_value( + args.get("threadId") + .cloned() + .ok_or_else(|| "Missing threadId".to_string())?, + ) + .map_err(|e| format!("Failed to parse threadId: {}", e))?; + let flow_name: String = serde_json::from_value( + args.get("flowName") + .cloned() + .ok_or_else(|| "Missing flowName".to_string())?, + ) + .map_err(|e| format!("Failed to parse flowName: {}", e))?; + let flow_spec: serde_json::Value = args + .get("flowSpec") + .cloned() + .ok_or_else(|| "Missing flowSpec".to_string())?; + let participant_roles: Vec = + serde_json::from_value( + args.get("participantRoles") + .cloned() + .ok_or_else(|| "Missing participantRoles".to_string())?, + ) + .map_err(|e| format!("Failed to parse participantRoles: {}", e))?; + let result = crate::commands::multiparty::send_flow_invitation( + state.clone(), + thread_id, + flow_name, + flow_spec, + participant_roles, + ) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } + "accept_flow_invitation" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let flow_name: String = serde_json::from_value( + args.get("flowName") + .cloned() + .ok_or_else(|| "Missing flowName".to_string())?, + ) + .map_err(|e| format!("Failed to parse flowName: {}", e))?; + let flow_spec: serde_json::Value = args + .get("flowSpec") + .cloned() + .ok_or_else(|| "Missing flowSpec".to_string())?; + let participants: Vec = + serde_json::from_value( + args.get("participants") + .cloned() + .ok_or_else(|| "Missing participants".to_string())?, + ) + .map_err(|e| format!("Failed to parse participants: {}", e))?; + let auto_run_all: bool = args + .get("autoRunAll") + .and_then(|v| serde_json::from_value(v.clone()).ok()) + .unwrap_or(false); + let thread_id: Option = args + .get("threadId") + .and_then(|v| serde_json::from_value(v.clone()).ok()); + let input_overrides: Option> = args + .get("inputOverrides") + .map(|value| serde_json::from_value(value.clone())) + .transpose() + .map_err(|e| format!("Failed to parse inputOverrides: {}", e))?; + let result = crate::commands::multiparty::accept_flow_invitation( + state.clone(), + session_id, + flow_name, + flow_spec, + participants, + auto_run_all, + thread_id, + input_overrides, + ) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } + "get_multiparty_flow_state" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let result = + crate::commands::multiparty::get_multiparty_flow_state(state.clone(), session_id) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } + "get_all_participant_progress" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let result = crate::commands::multiparty::get_all_participant_progress(session_id) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } + "get_participant_logs" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let result = crate::commands::multiparty::get_participant_logs(session_id) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } + "get_multiparty_step_diagnostics" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let step_id: String = serde_json::from_value( + args.get("stepId") + .cloned() + .ok_or_else(|| "Missing stepId".to_string())?, + ) + .map_err(|e| format!("Failed to parse stepId: {}", e))?; + let result = + crate::commands::multiparty::get_multiparty_step_diagnostics(session_id, step_id) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } + "get_multiparty_step_logs" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let step_id: String = serde_json::from_value( + args.get("stepId") + .cloned() + .ok_or_else(|| "Missing stepId".to_string())?, + ) + .map_err(|e| format!("Failed to parse stepId: {}", e))?; + let lines: Option = args + .get("lines") + .cloned() + .and_then(|v| serde_json::from_value(v).ok()); + let result = crate::commands::multiparty::get_multiparty_step_logs( + state.clone(), + session_id, + step_id, + lines, + ) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } + "set_step_auto_run" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let step_id: String = serde_json::from_value( + args.get("stepId") + .cloned() + .ok_or_else(|| "Missing stepId".to_string())?, + ) + .map_err(|e| format!("Failed to parse stepId: {}", e))?; + let auto_run: bool = serde_json::from_value( + args.get("autoRun") + .cloned() + .ok_or_else(|| "Missing autoRun".to_string())?, + ) + .map_err(|e| format!("Failed to parse autoRun: {}", e))?; + crate::commands::multiparty::set_step_auto_run(session_id, step_id, auto_run) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::Value::Null) + } + "run_flow_step" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let step_id: String = serde_json::from_value( + args.get("stepId") + .cloned() + .ok_or_else(|| "Missing stepId".to_string())?, + ) + .map_err(|e| format!("Failed to parse stepId: {}", e))?; + let result = + crate::commands::multiparty::run_flow_step(state.clone(), session_id, step_id) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } + "share_step_outputs" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let step_id: String = serde_json::from_value( + args.get("stepId") + .cloned() + .ok_or_else(|| "Missing stepId".to_string())?, + ) + .map_err(|e| format!("Failed to parse stepId: {}", e))?; + crate::commands::multiparty::share_step_outputs(state.clone(), session_id, step_id) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::Value::Null) + } + "share_step_outputs_to_chat" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let step_id: String = serde_json::from_value( + args.get("stepId") + .cloned() + .ok_or_else(|| "Missing stepId".to_string())?, + ) + .map_err(|e| format!("Failed to parse stepId: {}", e))?; + let result = crate::commands::multiparty::share_step_outputs_to_chat( + state.clone(), + session_id, + step_id, + ) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } + "get_step_output_files" => { + let session_id: String = serde_json::from_value( + args.get("sessionId") + .cloned() + .ok_or_else(|| "Missing sessionId".to_string())?, + ) + .map_err(|e| format!("Failed to parse sessionId: {}", e))?; + let step_id: String = serde_json::from_value( + args.get("stepId") + .cloned() + .ok_or_else(|| "Missing stepId".to_string())?, + ) + .map_err(|e| format!("Failed to parse stepId: {}", e))?; + let result = crate::commands::multiparty::get_step_output_files(session_id, step_id) + .await + .map_err(|e| e.to_string())?; + Ok(serde_json::to_value(result).unwrap()) + } + _ => { crate::desktop_log!("⚠️ Unhandled command: {}", cmd); Err(format!("Unhandled command: {}", cmd)) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 0105d0d0..f7d9343a 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -8,16 +8,7 @@ }, "app": { "withGlobalTauri": true, - "windows": [ - { - "title": "BioVault", - "width": 1100, - "height": 700, - "minWidth": 900, - "minHeight": 600, - "devtools": true - } - ], + "windows": [], "security": { "csp": null, diff --git a/src/css/messages.css b/src/css/messages.css index 96b4e55b..3187b735 100644 --- a/src/css/messages.css +++ b/src/css/messages.css @@ -489,6 +489,17 @@ text-transform: uppercase; } +/* Group Badge - Purple styling for group chats */ +.message-thread-group { + padding: 2px 6px; + background: #ede9fe; + color: #7c3aed; + border-radius: 4px; + font-size: 9px; + font-weight: 600; + text-transform: uppercase; +} + /* Session Badge - Prominent amber styling to match session threads */ .message-thread-session { display: inline-flex; @@ -1936,3 +1947,836 @@ white-space: pre-wrap; word-break: break-all; } + +/* ============================================================================ + MULTIPARTY FLOW MODAL + ============================================================================ */ + +.multiparty-modal { + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: rgba(0, 0, 0, 0.5); + display: flex; + align-items: center; + justify-content: center; + z-index: 1000; +} + +.multiparty-modal-content { + background: #fff; + border-radius: 12px; + width: 90%; + max-width: 600px; + max-height: 80vh; + display: flex; + flex-direction: column; + box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3); +} + +.multiparty-modal-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 16px 20px; + border-bottom: 1px solid #e2e8f0; +} + +.multiparty-modal-title { + font-size: 18px; + font-weight: 600; + color: #1e293b; + margin: 0; +} + +.multiparty-modal-close { + background: none; + border: none; + font-size: 24px; + color: #64748b; + cursor: pointer; + padding: 0; + line-height: 1; +} + +.multiparty-modal-close:hover { + color: #1e293b; +} + +.multiparty-modal-body { + flex: 1; + overflow-y: auto; + padding: 20px; +} + +.multiparty-section { + margin-bottom: 24px; +} + +.multiparty-section h3 { + font-size: 14px; + font-weight: 600; + color: #64748b; + text-transform: uppercase; + letter-spacing: 0.05em; + margin: 0 0 12px 0; +} + +.multiparty-participants-list { + display: flex; + flex-direction: column; + gap: 8px; +} + +.multiparty-participant { + display: flex; + align-items: center; + gap: 8px; + padding: 10px 12px; + background: #f8fafc; + border-radius: 8px; +} + +.participant-icon { + font-size: 18px; +} + +.participant-email { + font-size: 14px; + color: #1e293b; + font-weight: 500; +} + +.participant-role { + font-size: 12px; + color: #64748b; +} + +.multiparty-steps-list { + display: flex; + flex-direction: column; + gap: 12px; +} + +.multiparty-step { + padding: 16px; + border-radius: 8px; + border: 1px solid #e2e8f0; + transition: all 0.2s ease; +} + +.multiparty-step.my-step { + background: #f0f9ff; + border-color: #bae6fd; +} + +.multiparty-step.other-step { + background: #f8fafc; + opacity: 0.7; +} + +.step-header { + display: flex; + align-items: center; + gap: 12px; + margin-bottom: 8px; +} + +.step-auto-run { + display: flex; + align-items: center; + gap: 4px; + font-size: 12px; + color: #64748b; + cursor: pointer; +} + +.step-auto-run input { + cursor: pointer; +} + +.step-auto-run input:disabled { + cursor: not-allowed; + opacity: 0.5; +} + +.step-info { + flex: 1; + display: flex; + justify-content: space-between; + align-items: center; +} + +.step-name { + font-size: 14px; + font-weight: 600; + color: #1e293b; +} + +.step-status { + font-size: 12px; + padding: 4px 8px; + border-radius: 4px; + display: flex; + align-items: center; + gap: 4px; +} + +.step-status.status-pending { + background: #f1f5f9; + color: #64748b; +} + +.step-status.status-waiting { + background: #fef3c7; + color: #92400e; +} + +.step-status.status-ready { + background: #d1fae5; + color: #065f46; +} + +.step-status.status-running { + background: #dbeafe; + color: #1d4ed8; +} + +.step-status.status-completed { + background: #d1fae5; + color: #065f46; +} + +.step-status.status-sharing { + background: #e0e7ff; + color: #4338ca; +} + +.step-status.status-shared { + background: #c7d2fe; + color: #3730a3; +} + +.step-status.status-failed { + background: #fee2e2; + color: #991b1b; +} + +.step-description { + font-size: 13px; + color: #64748b; + margin-bottom: 12px; +} + +.step-actions { + display: flex; + gap: 8px; + flex-wrap: wrap; +} + +.step-btn { + padding: 6px 12px; + border-radius: 6px; + font-size: 12px; + font-weight: 500; + cursor: pointer; + border: none; + transition: all 0.2s ease; +} + +.step-btn.run-btn { + background: #3b82f6; + color: #fff; +} + +.step-btn.run-btn:hover { + background: #2563eb; +} + +.step-btn.preview-btn { + background: #f1f5f9; + color: #1e293b; + border: 1px solid #e2e8f0; +} + +.step-btn.preview-btn:hover { + background: #e2e8f0; +} + +.step-btn.share-btn { + background: #10b981; + color: #fff; +} + +.step-btn.share-btn:hover { + background: #059669; +} + +.running-indicator { + color: #3b82f6; + font-size: 12px; + font-weight: 500; +} + +.waiting-text { + color: #64748b; + font-size: 12px; + font-style: italic; +} + +.completed-text { + color: #059669; + font-size: 12px; + font-weight: 500; +} + +.shared-text { + color: #4338ca; + font-size: 12px; + font-weight: 500; +} + +.multiparty-modal-footer { + padding: 16px 20px; + border-top: 1px solid #e2e8f0; + display: flex; + justify-content: flex-end; + gap: 12px; +} + +/* Flow Invitation Card in Messages */ +.flow-invitation-card { + background: linear-gradient(135deg, #ede9fe, #ddd6fe); + border: 1px solid #c4b5fd; + border-radius: 12px; + padding: 16px; + margin: 8px 0; +} + +.flow-invitation-header { + display: flex; + align-items: center; + gap: 8px; + margin-bottom: 12px; +} + +.flow-invitation-icon { + font-size: 24px; +} + +.flow-invitation-title { + font-size: 16px; + font-weight: 600; + color: #4c1d95; +} + +.flow-invitation-participants { + font-size: 13px; + color: #6b21a8; + margin-bottom: 12px; +} + +.flow-invitation-actions { + display: flex; + gap: 8px; +} + +.flow-invitation-btn { + padding: 8px 16px; + border-radius: 6px; + font-size: 13px; + font-weight: 500; + cursor: pointer; + border: none; + transition: all 0.2s ease; +} + +.flow-invitation-btn.join-btn { + background: #7c3aed; + color: #fff; +} + +.flow-invitation-btn.join-btn:hover { + background: #6d28d9; +} + +.flow-invitation-btn.decline-btn { + background: #fff; + color: #6b21a8; + border: 1px solid #c4b5fd; +} + +.flow-invitation-btn.decline-btn:hover { + background: #f5f3ff; +} + +/* Propose Flow Button */ +.msg-propose-flow-btn { + width: 36px; + height: 36px; + border-radius: 8px; + border: 1px solid #e2e8f0; + background: #f8fafc; + color: #8b5cf6; + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; + flex-shrink: 0; + transition: all 0.15s ease; +} + +.msg-propose-flow-btn:hover { + background: #ede9fe; + border-color: #c4b5fd; +} + +/* Propose Flow Modal Styles */ +.propose-flow-select { + width: 100%; + padding: 10px 12px; + border: 1px solid #e2e8f0; + border-radius: 8px; + font-size: 14px; + color: #1e293b; + background: #fff; + cursor: pointer; +} + +.propose-flow-select:focus { + outline: none; + border-color: #8b5cf6; + box-shadow: 0 0 0 3px rgba(139, 92, 246, 0.1); +} + +.propose-flow-roles-list { + display: flex; + flex-direction: column; + gap: 12px; +} + +.propose-flow-role-row { + display: flex; + align-items: center; + gap: 12px; + padding: 10px 12px; + background: #f8fafc; + border-radius: 8px; + border: 1px solid #e2e8f0; +} + +.propose-flow-role-label { + flex: 0 0 120px; + font-weight: 600; + color: #475569; + font-size: 13px; +} + +.propose-flow-role-arrow { + color: #94a3b8; +} + +.propose-flow-role-select { + flex: 1; + padding: 8px 10px; + border: 1px solid #e2e8f0; + border-radius: 6px; + font-size: 13px; + color: #1e293b; + background: #fff; +} + +.propose-flow-role-select:focus { + outline: none; + border-color: #8b5cf6; +} + +.propose-flow-message { + width: 100%; + min-height: 60px; + padding: 10px 12px; + border: 1px solid #e2e8f0; + border-radius: 8px; + font-size: 14px; + color: #1e293b; + resize: vertical; + font-family: inherit; +} + +.propose-flow-message:focus { + outline: none; + border-color: #8b5cf6; + box-shadow: 0 0 0 3px rgba(139, 92, 246, 0.1); +} + +/* Flow invitation card - View in Runs button */ +.flow-invitation-btn.view-runs-btn { + background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%); + color: #fff; + border: none; +} + +.flow-invitation-btn.view-runs-btn:hover { + background: linear-gradient(135deg, #2563eb 0%, #1d4ed8 100%); +} + +.flow-invitation-btn.import-btn { + background: linear-gradient(135deg, #10b981 0%, #059669 100%); + color: #fff; + border: none; +} + +.flow-invitation-btn.import-btn:hover { + background: linear-gradient(135deg, #059669 0%, #047857 100%); +} + +.flow-invitation-btn.import-btn:disabled { + background: #94a3b8; + cursor: not-allowed; +} + +.flow-invitation-status { + font-size: 12px; + color: #059669; + margin-top: 6px; + min-height: 16px; +} + +/* Flow invitation - Role display */ +.flow-invitation-role { + font-size: 13px; + color: #475569; + margin: 8px 0; + padding: 6px 10px; + background: #f0fdf4; + border: 1px solid #bbf7d0; + border-radius: 6px; +} + +.flow-invitation-role strong { + color: #166534; +} + +.flow-input-picker-modal { + position: fixed; + inset: 0; + z-index: 10050; + display: flex; + align-items: center; + justify-content: center; +} + +.flow-input-picker-backdrop { + position: absolute; + inset: 0; + background: rgba(15, 23, 42, 0.45); +} + +.flow-input-picker-content { + position: relative; + width: min(720px, 92vw); + max-height: 84vh; + display: flex; + flex-direction: column; + background: #ffffff; + border-radius: 12px; + border: 1px solid #e2e8f0; + box-shadow: 0 20px 50px rgba(15, 23, 42, 0.35); + overflow: hidden; +} + +.flow-input-picker-header { + padding: 16px 18px 12px; + border-bottom: 1px solid #e2e8f0; +} + +.flow-input-picker-header h3 { + margin: 0; + font-size: 17px; + color: #0f172a; +} + +.flow-input-picker-header p { + margin: 4px 0 0; + font-size: 12px; + color: #64748b; +} + +.flow-input-picker-body { + padding: 14px 18px; + overflow: auto; + display: flex; + flex-direction: column; + gap: 12px; +} + +.flow-input-picker-row { + padding: 10px 12px; + background: #f8fafc; + border: 1px solid #e2e8f0; + border-radius: 10px; + display: flex; + flex-direction: column; + gap: 8px; +} + +.flow-input-picker-label { + font-size: 13px; + font-weight: 600; + color: #0f172a; +} + +.flow-input-picker-hint { + margin-top: 2px; + font-size: 11px; + font-weight: 500; + color: #64748b; +} + +.flow-input-picker-select, +.flow-input-picker-text { + width: 100%; + padding: 8px 10px; + border: 1px solid #cbd5e1; + border-radius: 8px; + font-size: 13px; + color: #1e293b; + background: #ffffff; +} + +.flow-input-picker-select { + min-height: 38px; +} + +.flow-input-picker-file-list { + max-height: 240px; + overflow: auto; + border: 1px solid #cbd5e1; + border-radius: 8px; + background: #ffffff; +} + +.flow-input-picker-controls { + display: flex; + gap: 8px; +} + +.flow-input-picker-search { + flex: 1; + padding: 8px 10px; + border: 1px solid #cbd5e1; + border-radius: 8px; + font-size: 13px; + color: #1e293b; +} + +.flow-input-picker-type-filter { + min-width: 140px; + padding: 8px 10px; + border: 1px solid #cbd5e1; + border-radius: 8px; + font-size: 13px; + color: #1e293b; + background: #fff; +} + +.flow-input-picker-select-all-row { + display: flex; + align-items: center; + gap: 8px; + font-size: 12px; + color: #334155; +} + +.flow-input-picker-table { + width: 100%; + border-collapse: collapse; + font-size: 12px; +} + +.flow-input-picker-table thead th { + position: sticky; + top: 0; + background: #f8fafc; + border-bottom: 1px solid #e2e8f0; + text-align: left; + padding: 8px 10px; + font-weight: 600; + color: #334155; +} + +.flow-input-picker-table thead th:first-child { + width: 36px; +} + +.flow-input-picker-file-row td { + border-bottom: 1px solid #eef2f7; + padding: 7px 10px; + color: #334155; +} + +.flow-input-picker-file-row td:nth-child(3) { + max-width: 320px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.flow-input-picker-file-row:hover { + background: #f8fafc; +} + +.flow-input-picker-checkbox { + width: 16px; + height: 16px; +} + +.flow-input-picker-footer { + padding: 12px 18px 16px; + border-top: 1px solid #e2e8f0; + display: flex; + justify-content: flex-end; + gap: 10px; +} + +.flow-input-picker-cancel, +.flow-input-picker-confirm { + padding: 9px 14px; + border-radius: 8px; + font-size: 13px; + font-weight: 600; + cursor: pointer; +} + +.flow-input-picker-cancel { + background: #ffffff; + border: 1px solid #cbd5e1; + color: #334155; +} + +.flow-input-picker-confirm { + background: #2563eb; + border: 1px solid #1d4ed8; + color: #ffffff; +} + +/* Participant chip highlighting */ +.participant-chip.is-me { + background: #dbeafe; + border-color: #93c5fd; + font-weight: 600; +} + +/* ============================================================================ + EMAIL CHIP INPUT + ============================================================================ */ + +.chip-input-wrapper { + display: flex; + flex-wrap: wrap; + align-items: center; + gap: 6px; + min-height: 36px; + padding: 4px 8px; + background: #fff; + border: 1px solid #e2e8f0; + border-radius: 8px; + cursor: text; +} + +.chip-input-wrapper:focus-within { + border-color: #3b82f6; + box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.1); +} + +.chips-container { + display: flex; + flex-wrap: wrap; + gap: 4px; +} + +.email-chip { + display: inline-flex; + align-items: center; + gap: 4px; + padding: 4px 8px; + background: #e0e7ff; + border: 1px solid #a5b4fc; + border-radius: 16px; + font-size: 13px; + color: #3730a3; + white-space: nowrap; +} + +.email-chip .chip-remove { + display: flex; + align-items: center; + justify-content: center; + width: 16px; + height: 16px; + padding: 0; + margin-left: 2px; + background: transparent; + border: none; + border-radius: 50%; + font-size: 14px; + font-weight: bold; + color: #6366f1; + cursor: pointer; + line-height: 1; +} + +.email-chip .chip-remove:hover { + background: #c7d2fe; + color: #4338ca; +} + +.chip-input-wrapper input { + flex: 1; + min-width: 120px; + padding: 4px; + border: none; + background: transparent; + font-size: 14px; + outline: none; +} + +.chip-input-wrapper input::placeholder { + color: #94a3b8; +} + +/* Override the default recipient input styling when inside chip wrapper */ +.msg-compose-recipient .chip-input-wrapper { + margin-top: 4px; +} + +.msg-compose-recipient .chip-input-wrapper input { + padding: 0; +} + +/* Participant chips in thread header */ +.participant-chip { + display: inline-block; + padding: 2px 8px; + margin: 0 3px; + font-size: 12px; + font-weight: 500; + border-radius: 12px; + border: 1px solid; + white-space: nowrap; +} + +.participant-label { + color: var(--text-secondary, #64748b); + font-size: 13px; + margin-right: 4px; +} + +#message-thread-participants { + display: flex; + flex-wrap: wrap; + align-items: center; + gap: 2px; +} diff --git a/src/css/runs.css b/src/css/runs.css index 6388fd4c..3b8c1fe5 100644 --- a/src/css/runs.css +++ b/src/css/runs.css @@ -885,3 +885,979 @@ margin: 0; line-height: 1.5; } + +/* ============================================================================ + MULTIPARTY FLOW STYLES + ============================================================================ */ + +.multiparty-details { + padding: 16px 20px; +} + +.mp-section { + margin-bottom: 16px; +} + +.mp-section:last-child { + margin-bottom: 0; +} + +.mp-section-title { + font-size: 13px; + font-weight: 600; + color: #374151; + margin-bottom: 10px; +} + +.mp-participants { + display: flex; + flex-wrap: wrap; + gap: 8px; +} + +.mp-participant { + display: flex; + flex-direction: column; + padding: 8px 12px; + background: #f8fafc; + border: 1px solid #e2e8f0; + border-radius: 8px; + font-size: 12px; +} + +.mp-participant.is-me { + background: #eff6ff; + border-color: #bfdbfe; +} + +.mp-participant-role { + font-weight: 600; + color: #1e40af; + margin-bottom: 2px; +} + +.mp-participant-email { + color: #64748b; +} + +.mp-inputs-section { + margin-top: 6px; +} + +.mp-inputs-toggle { + width: 100%; + display: flex; + align-items: center; + justify-content: space-between; + padding: 8px 10px; + background: #f8fafc; + border: 1px solid #e2e8f0; + border-radius: 8px; + font-size: 12px; + font-weight: 600; + color: #334155; + cursor: pointer; +} + +.mp-inputs-count { + background: #e2e8f0; + color: #475569; + border-radius: 999px; + padding: 2px 7px; + font-size: 11px; + font-weight: 700; +} + +.mp-inputs-body { + margin-top: 8px; + padding: 10px; + background: #ffffff; + border: 1px solid #e2e8f0; + border-radius: 8px; + display: flex; + flex-direction: column; + gap: 8px; +} + +.mp-input-row { + display: flex; + flex-direction: column; + gap: 3px; + padding-bottom: 8px; + border-bottom: 1px dashed #e2e8f0; +} + +.mp-input-row:last-child { + border-bottom: none; + padding-bottom: 0; +} + +.mp-input-key { + font-family: 'SF Mono', Monaco, monospace; + font-size: 11px; + color: #64748b; +} + +.mp-input-value { + font-family: 'SF Mono', Monaco, monospace; + font-size: 12px; + color: #0f172a; + word-break: break-all; +} + +.mp-steps-list { + display: flex; + flex-direction: column; + gap: 0; + padding: 8px 0; +} + +.mp-steps-loading, +.mp-no-steps, +.mp-error { + padding: 20px; + text-align: center; + color: #64748b; + font-size: 13px; +} + +.mp-error { + color: #dc2626; + background: #fef2f2; + border-radius: 6px; +} + +.mp-step { + padding: 0; + background: #f8fafc; + border: 1px solid #e2e8f0; + border-radius: 10px; + box-shadow: 0 1px 2px rgba(15, 23, 42, 0.04); + transition: all 0.2s; + overflow: hidden; + margin: 0 0 14px 0; +} + +.mp-step:last-child { + margin-bottom: 0; +} + +.mp-step.my-action { + background: #f0f9ff; + border-color: #bae6fd; +} + +.mp-step.other-action { + opacity: 0.7; +} + +.mp-step-header { + display: flex; + align-items: center; + gap: 8px; + width: 100%; + padding: 13px 14px; + background: transparent; + border: none; + cursor: pointer; + text-align: left; +} + +.mp-step-chevron { + font-size: 14px; + color: #64748b; + min-width: 12px; +} + +.mp-step-status { + font-size: 14px; +} + +.mp-step-name { + font-weight: 600; + color: #1f2937; + font-size: 14px; + line-height: 1.2; +} + +.mp-step-timer { + margin-left: 8px; + font-size: 11px; + font-weight: 600; + color: #64748b; + white-space: nowrap; +} + +.mp-step-badge { + margin-left: auto; + padding: 2px 8px; + background: #dbeafe; + color: #1e40af; + font-size: 10px; + font-weight: 600; + border-radius: 10px; + text-transform: uppercase; +} + +.mp-step-desc { + font-size: 12px; + color: #64748b; + margin-bottom: 8px; +} + +.mp-step-body { + padding: 0 14px 12px 14px; +} + +.mp-step.collapsed .mp-step-body { + display: none; +} + +.mp-step-controls { + display: flex; + align-items: center; + gap: 8px; + flex-wrap: wrap; + margin-top: 8px; +} + +.flow-run-card.mp-run-complete { + border-color: #86efac !important; + box-shadow: + 0 0 0 1px rgba(34, 197, 94, 0.18), + 0 10px 26px rgba(34, 197, 94, 0.12) !important; +} + +.flow-run-card.mp-run-complete .run-header { + background: linear-gradient(135deg, #f0fdf4 0%, #dcfce7 100%) !important; + border-bottom-color: #bbf7d0 !important; +} + +.status-badge.mp-run-status-complete { + background: linear-gradient(135deg, #d1fae5 0%, #86efac 100%) !important; + color: #166534 !important; + border-color: rgba(22, 101, 52, 0.25) !important; +} + +.mp-auto-toggle { + display: flex; + align-items: center; + gap: 4px; + font-size: 11px; + color: #64748b; + cursor: pointer; +} + +.mp-auto-toggle input { + cursor: pointer; +} + +.mp-auto-toggle input:disabled { + cursor: not-allowed; + opacity: 0.5; +} + +.mp-btn { + padding: 4px 10px; + font-size: 12px; + font-weight: 500; + border: none; + border-radius: 4px; + cursor: pointer; + transition: all 0.2s; +} + +.mp-run-btn { + background: #3b82f6; + color: white; +} + +.mp-run-btn:hover { + background: #2563eb; +} + +.mp-preview-btn { + background: #f1f5f9; + color: #475569; + border: 1px solid #e2e8f0; +} + +.mp-preview-btn:hover { + background: #e2e8f0; +} + +.mp-retry-btn { + background: #f59e0b; + color: white; +} + +.mp-retry-btn:hover { + background: #d97706; +} + +.mp-share-btn { + background: #10b981; + color: white; +} + +.mp-share-btn:hover { + background: #059669; +} + +.mp-chat-share-btn { + background: #eef2ff; + color: #3730a3; + border: 1px solid #c7d2fe; +} + +.mp-chat-share-btn:hover { + background: #e0e7ff; +} + +.mp-waiting, +.mp-running, +.mp-done, +.mp-shared { + font-size: 12px; + color: #64748b; +} + +.mp-running { + color: #3b82f6; +} + +.mp-done { + color: #059669; +} + +.mp-shared { + color: #059669; +} + +/* Step status styling */ +.mp-step.mp-step-pending { + border-left: 3px solid #94a3b8; +} +.mp-step.mp-step-waitingforinputs { + border-left: 3px solid #f59e0b; +} +.mp-step.mp-step-ready { + border-left: 3px solid #22c55e; +} +.mp-step.mp-step-running { + border-left: 3px solid #3b82f6; +} +.mp-step.mp-step-completed { + border-left: 3px solid #059669; + background: linear-gradient(135deg, #f0fdf4 0%, #ecfdf3 100%); + border-color: #86efac; +} +.mp-step.mp-step-sharing { + border-left: 3px solid #8b5cf6; +} +.mp-step.mp-step-shared { + border-left: 3px solid #16a34a; + background: linear-gradient(135deg, #f0fdf4 0%, #ecfdf3 100%); + border-color: #86efac; +} +.mp-step.mp-step-failed { + border-left: 3px solid #ef4444; +} + +/* Future/inactive steps: keep them visually subdued */ +.mp-step.mp-step-pending, +.mp-step.mp-step-waitingforinputs, +.mp-step.mp-step-ready { + background: #ffffff; + border-color: #e5e7eb; + box-shadow: none; +} + +.mp-step.mp-step-pending:not(.next-step), +.mp-step.mp-step-waitingforinputs:not(.next-step), +.mp-step.mp-step-ready:not(.next-step) { + opacity: 0.78; +} + +.mp-step.other-action.mp-step-completed, +.mp-step.other-action.mp-step-shared { + opacity: 1; +} + +/* Progress section */ +.mp-progress-section { + padding: 12px; + background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%); + border-radius: 8px; + margin-bottom: 12px; +} + +.mp-progress-bar { + height: 8px; + background: #e2e8f0; + border-radius: 4px; + overflow: hidden; + margin-bottom: 8px; +} + +.mp-progress-fill { + height: 100%; + background: linear-gradient(90deg, #22c55e 0%, #16a34a 100%); + border-radius: 4px; + transition: width 0.3s ease; +} + +.mp-progress-text { + font-size: 13px; + color: #475569; + margin-bottom: 10px; +} + +.mp-progress-actions { + display: flex; + gap: 8px; + margin-bottom: 10px; +} + +.mp-collapse-btn { + background: #ffffff; + border: 1px solid #d1d5db; + color: #475569; +} + +.mp-collapse-btn:hover { + background: #f8fafc; +} + +.mp-waiting-banner { + margin-bottom: 10px; + padding: 10px 12px; + border-radius: 8px; + background: #eef2ff; + color: #3730a3; + font-size: 12px; + font-weight: 600; +} + +.mp-waiting-banner.is-you { + background: #fee2e2; + color: #991b1b; + border: 1px solid #fecaca; + font-size: 14px; +} + +.mp-waiting-banner.is-clear { + background: #dcfce7; + color: #166534; +} + +.mp-waiting-chip { + display: inline-block; + padding: 2px 8px; + border-radius: 999px; + background: rgba(255, 255, 255, 0.8); + margin-left: 6px; + font-size: 11px; + font-weight: 700; +} + +.mp-run-next-btn { + width: 100%; + padding: 10px 16px; + background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%); + color: white; + border: none; + border-radius: 6px; + font-weight: 600; + font-size: 14px; + cursor: pointer; + transition: all 0.2s; +} + +.mp-run-next-btn:hover { + background: linear-gradient(135deg, #2563eb 0%, #1d4ed8 100%); + transform: translateY(-1px); + box-shadow: 0 4px 12px rgba(37, 99, 235, 0.3); +} + +/* Next step highlight */ +.mp-step.next-step { + background: linear-gradient(135deg, #dbeafe 0%, #bfdbfe 100%); + border-left: 4px solid #2563eb; + border-color: #93c5fd; + box-shadow: + 0 0 0 1px rgba(37, 99, 235, 0.2), + 0 8px 20px rgba(37, 99, 235, 0.22); + opacity: 1; +} + +.mp-step.next-step .mp-step-name { + color: #1d4ed8; + font-weight: 700; +} + +.mp-step.next-step .mp-step-header { + background: rgba(255, 255, 255, 0.35); +} + +.mp-next-badge { + background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%) !important; + color: white !important; + animation: pulse-badge 2s infinite; +} + +@keyframes pulse-badge { + 0%, + 100% { + opacity: 1; + } + 50% { + opacity: 0.7; + } +} + +/* Participant progress chips */ +.mp-step-participants { + display: flex; + gap: 6px; + flex-wrap: wrap; + margin-top: 2px; + padding: 4px 0; +} + +.mp-participant-chip { + display: inline-flex; + align-items: center; + gap: 4px; + padding: 4px 10px; + border-radius: 12px; + font-size: 11px; + font-weight: 500; + background: #f3f4f6; + color: #6b7280; + border: 1px solid #e5e7eb; +} + +.mp-participant-chip.pending { + background: #fef3c7; + color: #92400e; + border-color: #fcd34d; +} + +.mp-participant-chip.completed { + background: #d1fae5; + color: #065f46; + border-color: #6ee7b7; +} + +.mp-participant-chip.shared { + background: #d1fae5; + color: #065f46; + border-color: #6ee7b7; +} + +.mp-participant-chip.not-involved { + background: #f9fafb; + color: #9ca3af; + border-color: #e5e7eb; + opacity: 0.6; +} + +.mp-net-panel { + margin-top: 8px; + padding: 10px 12px; + border-radius: 10px; + border: 1px solid #dbeafe; + background: linear-gradient(180deg, #f8fbff 0%, #f1f5f9 100%); +} + +.mp-net-header { + display: flex; + align-items: center; + justify-content: space-between; + gap: 10px; + margin-bottom: 8px; +} + +.mp-net-title { + font-size: 12px; + font-weight: 700; + letter-spacing: 0.02em; + color: #1e3a8a; + text-transform: uppercase; +} + +.mp-net-live { + display: inline-flex; + align-items: center; + gap: 6px; + padding: 2px 9px; + border-radius: 999px; + font-size: 11px; + font-weight: 700; + border: 1px solid transparent; +} + +.mp-net-live::before { + content: ''; + width: 7px; + height: 7px; + border-radius: 999px; + background: currentColor; +} + +.mp-net-live.is-connected { + color: #047857; + background: #d1fae5; + border-color: #6ee7b7; +} + +.mp-net-live.is-establishing { + color: #b45309; + background: #fef3c7; + border-color: #fcd34d; +} + +.mp-net-live.is-waiting { + color: #475569; + background: #e2e8f0; + border-color: #cbd5e1; +} + +.mp-net-links { + display: flex; + flex-direction: column; + gap: 6px; +} + +.mp-link-row { + display: flex; + align-items: center; + gap: 8px; + padding: 7px 8px; + border-radius: 8px; + border: 1px solid #e2e8f0; + background: #ffffff; +} + +.mp-link-row .mp-net-dot { + width: 8px; + height: 8px; + border-radius: 999px; + background: #94a3b8; + flex-shrink: 0; +} + +.mp-link-row.is-connected { + border-color: #6ee7b7; + background: #f0fdf4; +} + +.mp-link-row.is-connected .mp-net-dot { + color: #22c55e; + background: currentColor; + box-shadow: 0 0 0 0 currentColor; + animation: mp-net-pulse 1.4s infinite; +} + +.mp-link-row.is-establishing .mp-net-dot { + color: #f59e0b; + background: currentColor; + box-shadow: 0 0 0 0 currentColor; + animation: mp-net-pulse 1.2s infinite; +} + +.mp-link-end { + font-size: 11px; + font-weight: 700; + color: #0f172a; +} + +.mp-link-arrow { + color: #2563eb; + font-size: 13px; + font-weight: 700; + letter-spacing: 1px; + background: linear-gradient(90deg, #93c5fd 0%, #2563eb 50%, #93c5fd 100%); + -webkit-background-clip: text; + background-clip: text; + -webkit-text-fill-color: transparent; + background-size: 200% auto; + animation: mp-net-flow 1.1s linear infinite; +} + +.mp-link-row.is-waiting .mp-link-arrow { + animation: none; + -webkit-text-fill-color: initial; + background: none; + color: #94a3b8; +} + +.mp-link-meta { + margin-left: auto; + font-size: 11px; + color: #475569; + white-space: nowrap; +} + +.mp-peer-grid { + margin-top: 8px; + display: grid; + grid-template-columns: repeat(auto-fit, minmax(220px, 1fr)); + gap: 8px; +} + +.mp-peer-card { + padding: 8px 9px; + border-radius: 8px; + border: 1px solid #dbeafe; + background: #ffffff; +} + +.mp-peer-card .mp-net-dot { + width: 8px; + height: 8px; + border-radius: 999px; + background: #94a3b8; + flex-shrink: 0; +} + +.mp-peer-card.is-connected { + border-color: #86efac; + background: #f0fdf4; +} + +.mp-peer-card.is-connected .mp-net-dot { + background: #22c55e; +} + +.mp-peer-card.is-stale { + border-color: #fcd34d; + background: #fffbeb; +} + +.mp-peer-card.is-stale .mp-net-dot { + background: #f59e0b; +} + +.mp-peer-head { + display: flex; + align-items: center; + gap: 6px; + margin-bottom: 4px; +} + +.mp-peer-name { + font-size: 12px; + font-weight: 700; + color: #0f172a; +} + +.mp-peer-mode { + margin-left: auto; + font-size: 10px; + font-weight: 700; + color: #1d4ed8; + text-transform: uppercase; +} + +.mp-peer-metric-row { + display: flex; + justify-content: space-between; + gap: 8px; + font-size: 11px; + font-weight: 600; + color: #1f2937; +} + +.mp-peer-metric-row.subtle { + font-weight: 500; + color: #64748b; +} + +@keyframes mp-net-pulse { + 0% { + box-shadow: 0 0 0 0 currentColor; + } + 70% { + box-shadow: 0 0 0 8px transparent; + } + 100% { + box-shadow: 0 0 0 0 transparent; + } +} + +@keyframes mp-net-flow { + 0% { + background-position: 0% 50%; + } + 100% { + background-position: 200% 50%; + } +} + +.mp-step-contribs { + margin-top: 8px; + padding-top: 8px; + border-top: 1px dashed #dbeafe; + display: flex; + flex-direction: column; + gap: 6px; +} + +.mp-step-contrib-row { + display: flex; + align-items: center; + justify-content: space-between; + gap: 10px; +} + +.mp-step-contrib-label { + font-size: 11px; + color: #334155; +} + +.mp-contrib-open-btn { + font-size: 10px; + padding: 3px 8px; +} + +/* Tabs container */ +.mp-tabs-container { + margin-top: 16px; +} + +.mp-tabs-header { + display: flex; + gap: 4px; + border-bottom: 2px solid #e5e7eb; + margin-bottom: 16px; +} + +.mp-tab { + padding: 10px 20px; + border: none; + background: transparent; + color: #6b7280; + font-size: 16px; + font-weight: 500; + cursor: pointer; + border-bottom: 2px solid transparent; + margin-bottom: -2px; + transition: all 0.2s; +} + +.mp-tab:hover { + color: #374151; + background: #f9fafb; +} + +.mp-tab.active { + color: #3b82f6; + border-bottom-color: #3b82f6; + font-weight: 600; +} + +.mp-tab-content { + display: none; +} + +.mp-tab-content.active { + display: block; +} + +/* Logs content */ +.mp-logs-content { + max-height: 300px; + overflow-y: auto; + padding: 12px; + background: #f9fafb; + border-radius: 8px; + font-family: monospace; + font-size: 12px; +} + +.mp-step-code { + margin-top: 8px; +} + +.mp-step-code summary { + font-size: 13px; + font-weight: 600; + color: #334155; + cursor: pointer; +} + +.mp-step-logs { + margin-top: 8px; +} + +.mp-step-logs summary { + font-size: 13px; + font-weight: 600; + color: #334155; + cursor: pointer; +} + +.mp-code-block { + margin-top: 8px; + padding: 12px; + border-radius: 8px; + border: 1px solid #1e293b; + background: #0f172a; + color: #dbeafe; + font-size: 12px; + line-height: 1.45; + overflow: auto; +} + +.mp-step-log-block { + margin-top: 8px; + padding: 12px; + border-radius: 8px; + border: 1px solid #dbeafe; + background: #0b1220; + color: #cbd5e1; + font-size: 12px; + line-height: 1.45; + max-height: 240px; + overflow: auto; + white-space: pre-wrap; +} + +.mp-log-entry { + display: flex; + gap: 8px; + padding: 4px 0; + border-bottom: 1px solid #e5e7eb; +} + +.mp-log-entry:last-child { + border-bottom: none; +} + +.mp-log-time { + color: #9ca3af; + min-width: 70px; +} + +.mp-log-role { + font-weight: 600; + color: #3b82f6; + min-width: 40px; +} + +.mp-log-event { + color: #374151; +} + +.mp-log-step { + color: #6b7280; + margin-left: auto; +} + +.mp-no-logs { + color: #9ca3af; + text-align: center; + padding: 12px; +} diff --git a/src/data.js b/src/data.js index cc20838a..84460898 100644 --- a/src/data.js +++ b/src/data.js @@ -916,7 +916,7 @@ export function createDataModule({ invoke, dialog, getCurrentUserEmail }) { }) } - // Checkbox handler + // Checkbox handler with shift-click support const checkbox = row.querySelector('.file-checkbox') const setFileSelected = (targetId, selected) => { if (selected) { @@ -984,8 +984,15 @@ export function createDataModule({ invoke, dialog, getCurrentUserEmail }) { ) { return } - checkbox.checked = !checkbox.checked - checkbox.dispatchEvent(new Event('change')) + const fileId = parseInt(row.dataset.fileId) + const newChecked = !selectedFileIds.includes(fileId) + checkbox.checked = newChecked + setFileSelected(fileId, newChecked) + updateDeleteButton() + updateSelectAllCheckbox() + updateActionButtons() + syncSelectionToSessionStorage() + lastClickedFileId = fileId }) row.style.cursor = 'pointer' @@ -1243,6 +1250,7 @@ export function createDataModule({ invoke, dialog, getCurrentUserEmail }) { // Clear all file selections function clearAllSelections() { selectedFileIds = [] + lastClickedFileId = null // Remove selected class from all rows document.querySelectorAll('.file-row.selected').forEach((row) => { diff --git a/src/event-handlers.js b/src/event-handlers.js index a1b997af..ac421e31 100644 --- a/src/event-handlers.js +++ b/src/event-handlers.js @@ -224,6 +224,16 @@ export function setupEventHandlers({ sendMessageBtn.addEventListener('click', sendCurrentMessage) } + // Messages - Propose Flow (multiparty) + const proposeFlowBtn = document.getElementById('propose-flow-btn') + if (proposeFlowBtn) { + proposeFlowBtn.addEventListener('click', () => { + if (window.proposeFlowModal) { + window.proposeFlowModal.open() + } + }) + } + // Messages - SyftBox toggle const syftboxToggle = document.getElementById('message-syftbox-toggle') if (syftboxToggle) { diff --git a/src/flows.js b/src/flows.js index 4f07ecab..edaeac5a 100644 --- a/src/flows.js +++ b/src/flows.js @@ -887,8 +887,45 @@ export function createFlowsModule({ invoke, dialog, open: _open, navigateTo, ope // Check why it wasn't handled const context = getPendingDataRunContext() const hasData = hasPendingData(context) + const flow = flowState.flows.find((p) => p.id === flowId) if (!hasData) { + if (flow && flow.spec?.inputs && Object.keys(flow.spec.inputs).length > 0) { + const overrides = await promptFlowInputOverridesFromData({ + flowName: flow.name, + flowSpec: flow.spec, + initialOverrides: {}, + }) + if (overrides) { + const missingRequired = Object.entries(flow.spec.inputs || {}) + .filter(([name, spec]) => isRequiredInputSpec(spec)) + .filter(([name]) => { + const key = `inputs.${name}` + return !String(overrides[key] || '').trim() + }) + .map(([name]) => name) + if (missingRequired.length === 0) { + try { + const run = await invoke('run_flow', { + flowId, + inputOverrides: overrides, + resultsDir: null, + }) + if (typeof sessionStorage !== 'undefined') { + sessionStorage.setItem('autoExpandRunId', String(run.id)) + } + alert(`Flow started! Run ID: ${run.id}`) + if (navigateTo) navigateTo('runs') + return + } catch (error) { + console.error('Failed to start flow with input overrides:', error) + alert('Failed to run flow: ' + (error?.message || error)) + return + } + } + } + } + // No data selected - prompt user to select data first if (dialog && dialog.confirm) { const shouldNavigate = await dialog.confirm( @@ -908,7 +945,6 @@ export function createFlowsModule({ invoke, dialog, open: _open, navigateTo, ope } } else { // Data is selected but flow might not be compatible - const flow = flowState.flows.find((p) => p.id === flowId) const selectionShape = context?.datasetShape || 'List[GenotypeRecord]' if (flow && !flowAcceptsShape(flow, selectionShape)) { if (dialog && dialog.message) { @@ -937,6 +973,289 @@ export function createFlowsModule({ invoke, dialog, open: _open, navigateTo, ope return '' } + function extractInputDefaultValue(inputSpec) { + if (inputSpec && typeof inputSpec === 'object' && !Array.isArray(inputSpec)) { + return Object.prototype.hasOwnProperty.call(inputSpec, 'default') + ? inputSpec.default + : undefined + } + return undefined + } + + function normalizeInputDefaultValue(defaultValue) { + if (defaultValue === undefined || defaultValue === null) return '' + if (Array.isArray(defaultValue)) { + return defaultValue + .map((value) => String(value).trim()) + .filter(Boolean) + .join(',') + } + if (typeof defaultValue === 'object') { + try { + return JSON.stringify(defaultValue) + } catch { + return '' + } + } + return String(defaultValue) + } + + function parseInputListValue(rawValue) { + if (Array.isArray(rawValue)) { + return rawValue.map((value) => String(value).trim()).filter(Boolean) + } + const text = String(rawValue || '') + .trim() + .replace(/\n/g, ',') + if (!text) return [] + return text + .split(',') + .map((value) => value.trim()) + .filter(Boolean) + } + + function isListInputType(type, defaultValue) { + if (Array.isArray(defaultValue)) return true + const parsed = parseTypeExpr(String(type || '')) + if (parsed?.kind === 'List') return true + const normalized = String(type || '').toLowerCase() + return normalized.includes('list') || normalized.includes('array') || normalized.includes('[') + } + + function isFileInputTypeNode(node) { + if (!node || !node.kind) return false + if (node.kind === 'List') return isFileInputTypeNode(node.inner) + const normalized = String(node.kind || '') + .trim() + .toLowerCase() + return ( + normalized === 'file' || + normalized === 'directory' || + normalized === 'genotype' || + normalized === 'genotyperecord' || + normalized.includes('file') || + normalized.includes('dir') || + normalized.includes('path') || + normalized.includes('dataset') || + normalized.includes('genotype') + ) + } + + function isFileInputType(type) { + const text = String(type || '') + const parsed = parseTypeExpr(text) + if (parsed && isFileInputTypeNode(parsed)) return true + const normalized = text.toLowerCase() + return ( + normalized.includes('file') || + normalized.includes('dir') || + normalized.includes('path') || + normalized.includes('dataset') || + normalized.includes('genotype') + ) + } + + function isRequiredInputSpec(inputSpec) { + const inputType = describeInputType(inputSpec) + if (inputType && String(inputType).trim().endsWith('?')) return false + const defaultValue = extractInputDefaultValue(inputSpec) + return defaultValue === undefined || defaultValue === null || String(defaultValue).trim() === '' + } + + async function promptFlowInputOverridesFromData({ + flowName, + flowSpec, + initialOverrides = {}, + includeDatasites = false, + }) { + const inputs = flowSpec?.inputs || {} + const entries = Object.entries(inputs).filter(([name]) => + includeDatasites ? true : name !== 'datasites', + ) + if (entries.length === 0) return { ...initialOverrides } + + let files = [] + try { + const loaded = await invoke('get_files') + files = Array.isArray(loaded) ? loaded : [] + } catch (error) { + console.warn('Failed to load local files for flow input picker:', error) + } + const sortedFiles = files + .filter((file) => file && typeof file.file_path === 'string' && file.file_path.length > 0) + .sort((a, b) => { + const at = Date.parse(a.updated_at || a.created_at || '') || 0 + const bt = Date.parse(b.updated_at || b.created_at || '') || 0 + return bt - at + }) + + const modal = document.createElement('div') + modal.className = 'flow-input-picker-modal' + modal.innerHTML = ` +
+
+
+

Configure Flow Inputs

+

${escapeHtml(flowName || 'Flow')}

+
+
+ +
+ ` + document.body.appendChild(modal) + const body = modal.querySelector('.flow-input-picker-body') + const fieldRefs = [] + + entries.forEach(([name, inputSpec]) => { + const inputKey = `inputs.${name}` + const type = describeInputType(inputSpec) || 'String' + const defaultValue = extractInputDefaultValue(inputSpec) + const defaultText = normalizeInputDefaultValue(defaultValue) + const currentText = String(initialOverrides[inputKey] || defaultText || '').trim() + const listLike = isListInputType(type, defaultValue) + const fileLike = isFileInputType(type) + + const row = document.createElement('div') + row.className = 'flow-input-picker-row' + + const label = document.createElement('label') + label.className = 'flow-input-picker-label' + label.textContent = name + const hint = document.createElement('div') + hint.className = 'flow-input-picker-hint' + hint.textContent = `Type: ${type}` + label.appendChild(hint) + row.appendChild(label) + + if (fileLike && sortedFiles.length > 0) { + const selectedValues = new Set(parseInputListValue(currentText)) + let fileSelect = null + let fileCheckboxes = [] + if (listLike) { + const fileList = document.createElement('div') + fileList.className = 'flow-input-picker-file-list' + sortedFiles.forEach((file) => { + const fullPath = file.file_path + const parts = fullPath.split('/') + const fileName = parts[parts.length - 1] || fullPath + const dataType = file.data_type ? ` (${file.data_type})` : '' + const item = document.createElement('label') + item.className = 'flow-input-picker-file-item' + const checkbox = document.createElement('input') + checkbox.type = 'checkbox' + checkbox.className = 'flow-input-picker-checkbox' + checkbox.value = fullPath + checkbox.checked = selectedValues.has(fullPath) + const text = document.createElement('span') + text.textContent = `${fileName}${dataType}` + item.appendChild(checkbox) + item.appendChild(text) + fileList.appendChild(item) + fileCheckboxes.push(checkbox) + }) + row.appendChild(fileList) + } else { + fileSelect = document.createElement('select') + fileSelect.className = 'flow-input-picker-select' + const empty = document.createElement('option') + empty.value = '' + empty.textContent = 'Select a local file...' + fileSelect.appendChild(empty) + sortedFiles.forEach((file) => { + const option = document.createElement('option') + const fullPath = file.file_path + const parts = fullPath.split('/') + const fileName = parts[parts.length - 1] || fullPath + const dataType = file.data_type ? ` (${file.data_type})` : '' + option.value = fullPath + option.textContent = `${fileName}${dataType}` + if (selectedValues.has(fullPath)) option.selected = true + fileSelect.appendChild(option) + }) + row.appendChild(fileSelect) + } + + const manual = document.createElement('input') + manual.className = 'flow-input-picker-text' + manual.type = 'text' + manual.value = currentText + manual.placeholder = listLike + ? 'Or enter comma-separated paths/values' + : 'Or enter a custom path/value' + row.appendChild(manual) + + fieldRefs.push({ + name, + inputKey, + fileLike, + listLike, + defaultText, + fileSelect, + fileCheckboxes, + manual, + }) + } else { + const input = document.createElement('input') + input.className = 'flow-input-picker-text' + input.type = 'text' + input.value = currentText + input.placeholder = defaultText ? `Default: ${defaultText}` : 'Enter value' + row.appendChild(input) + fieldRefs.push({ + name, + inputKey, + fileLike: false, + listLike, + defaultText, + textInput: input, + }) + } + + body.appendChild(row) + }) + + return new Promise((resolve) => { + const cleanup = () => modal.remove() + const cancel = () => { + cleanup() + resolve(null) + } + const confirm = () => { + const overrides = { ...initialOverrides } + fieldRefs.forEach((field) => { + let value = '' + if (field.fileLike) { + const selected = field.listLike + ? Array.from(field.fileCheckboxes || []) + .filter((checkbox) => checkbox.checked) + .map((checkbox) => checkbox.value) + .filter(Boolean) + : Array.from(field.fileSelect?.selectedOptions || []) + .map((option) => option.value) + .filter(Boolean) + const manualValues = parseInputListValue(field.manual?.value || '') + const combined = selected.length > 0 ? selected : manualValues + value = field.listLike ? combined.join(',') : combined[0] || '' + } else { + value = String(field.textInput?.value || '').trim() + } + if (!value && field.defaultText) value = field.defaultText + if (value) overrides[field.inputKey] = value + else delete overrides[field.inputKey] + }) + cleanup() + resolve(overrides) + } + + modal.querySelector('.flow-input-picker-backdrop')?.addEventListener('click', cancel) + modal.querySelector('.flow-input-picker-cancel')?.addEventListener('click', cancel) + modal.querySelector('.flow-input-picker-confirm')?.addEventListener('click', confirm) + }) + } + function splitTypeTopLevel(value, delimiter) { if (!value) return [] const parts = [] @@ -984,6 +1303,8 @@ export function createFlowsModule({ invoke, dialog, open: _open, navigateTo, ope return 'ParticipantSheet' case 'genotyperecord': return 'GenotypeRecord' + case 'genotype': + return 'GenotypeRecord' case 'biovaultcontext': return 'BiovaultContext' default: @@ -1985,6 +2306,47 @@ export function createFlowsModule({ invoke, dialog, open: _open, navigateTo, ope + + + @@ -2170,6 +2532,10 @@ export function createFlowsModule({ invoke, dialog, open: _open, navigateTo, ope thalassemia: 'https://github.com/OpenMined/bioscript/blob/main/examples/thalassemia/thalassemia-classifier/flow.yaml', allele_freq: 'https://github.com/OpenMined/biovault/blob/main/flows/allele-freq/flow.yaml', + multiparty: 'https://github.com/OpenMined/biovault/blob/main/flows/multiparty/flow.yaml', + multiparty_allele_freq: + 'https://github.com/OpenMined/biovault/blob/main/flows/multiparty-allele-freq/flow.yaml', + syqure_demo: 'https://github.com/OpenMined/biovault/blob/main/flows/syqure-demo/flow.yaml', } const templateNames = { @@ -2178,6 +2544,9 @@ export function createFlowsModule({ invoke, dialog, open: _open, navigateTo, ope herc2: 'HERC2 Classifier', thalassemia: 'Thalassemia Classifier', allele_freq: 'Allele Frequency', + multiparty: 'Multiparty Demo', + multiparty_allele_freq: 'Multiparty Allele Freq', + syqure_demo: 'Syqure Demo', } // Use local path if available, otherwise use GitHub URL @@ -2518,14 +2887,20 @@ export function createFlowsModule({ invoke, dialog, open: _open, navigateTo, ope if (step.uses) { try { await invoke('delete_module', { moduleId: step.uses }) - } catch (_) {} + } catch (_) { + /* ignore cleanup errors */ + } } } try { await invoke('delete_flow', { flowId: existingFlow.id }) - } catch (_) {} + } catch (_) { + /* ignore cleanup errors */ + } } - } catch (_) {} + } catch (_) { + /* ignore cleanup errors */ + } } async function submitFlowURL(overwrite = false, urlOverride = null) { @@ -2602,6 +2977,7 @@ export function createFlowsModule({ invoke, dialog, open: _open, navigateTo, ope closeImportOptionsModal() let selected = selectedPath + let inferredName = 'this flow' try { if (!selected) { selected = await dialog.open({ @@ -2653,7 +3029,7 @@ export function createFlowsModule({ invoke, dialog, open: _open, navigateTo, ope let flowDir = selected let flowFile = null - let inferredName = lastParentName || fileName || 'imported-flow' + inferredName = lastParentName || fileName || 'imported-flow' if (isYamlFile) { if (parentNormalized) { @@ -7150,10 +7526,23 @@ steps:${ } if (startLocal) { - const inputOverrides = {} + let inputOverrides = {} if (flow?.spec?.inputs?.datasites) { inputOverrides['inputs.datasites'] = datasites.join(',') } + if (flow?.spec?.inputs && Object.keys(flow.spec.inputs).length > 0) { + const configured = await promptFlowInputOverridesFromData({ + flowName: flow.name, + flowSpec: flow.spec, + initialOverrides: inputOverrides, + }) + if (configured === null) { + submitBtn.disabled = false + submitBtn.textContent = 'Start Collaborative Run' + return + } + inputOverrides = configured + } await invoke('run_flow', { flowId: flow.id, inputOverrides, diff --git a/src/logs.js b/src/logs.js index 7e693c38..c5b6a5e0 100644 --- a/src/logs.js +++ b/src/logs.js @@ -302,7 +302,7 @@ export function createLogsModule({ invoke }) { 'Peer scan complete:', '[Network Dataset Debug]', 'Dataset scan complete:', - 'sync actions: uploads=0 downloads=0 remote_deletes=0 local_deletes=0 conflicts=0', + 'sync actions:', 'scan_remote: server returned', 'sync reconcile start:', 'SyftBox queue poll', @@ -312,6 +312,7 @@ export function createLogsModule({ invoke }) { 'bundle not cached:', 'updateThreadActivity:', 'GET http://127.0.0.1:7938/v1/sync/status', + 'files, ignored=', ] function filterVerboseLines(text) { diff --git a/src/main.js b/src/main.js index b350f6df..4726e2ad 100644 --- a/src/main.js +++ b/src/main.js @@ -7,6 +7,7 @@ import { createRunsModule } from './runs.js' import { createModulesModule } from './modules.js' import { createFlowsModule } from './flows.js' import { createMessagesModule } from './messages.js' +import { createMultipartyFlowModal, createProposeFlowModal } from './multiparty-flow-modal.js' import { createImportModule } from './import.js' import { createProgressUI } from './progress-ui.js' import { createCltManager } from './clt-manager.js' @@ -187,6 +188,21 @@ const { showInviteOptions: showMessagesInviteOptions, } = messagesModule +// Create multiparty flow modal +const multipartyFlowModal = createMultipartyFlowModal({ + invoke, + dialog, +}) + +// Create propose flow modal +const proposeFlowModal = createProposeFlowModal({ + invoke, + dialog, + getCurrentUserEmail, + getThreadParticipants: () => messagesModule.getActiveThreadParticipants(), + sendMessage: (req) => messagesModule.sendMessageToRecipients(req), +}) + // Expose messages module globally for test actions (e.g., notification test button wiring) window.__messagesModule = messagesModule window.__messagesTriggerTest__ = () => { diff --git a/src/messages.js b/src/messages.js index 80d75b40..a6172ce4 100644 --- a/src/messages.js +++ b/src/messages.js @@ -40,12 +40,778 @@ export function createMessagesModule({ let messagesRefreshInterval = null let messagesRefreshInProgress = false let threadActivityMap = new Map() + let threadParticipantsById = new Map() let hasActivityBaseline = false let notificationPermission = 'default' let messageSyncUnlisten = null let notificationApiPromise = null let searchTerm = '' let messageFilter = 'inbox' + + // Color palette for sender bubbles (soft, readable colors) + const SENDER_COLORS = [ + { bg: '#dbeafe', border: '#93c5fd' }, // blue + { bg: '#dcfce7', border: '#86efac' }, // green + { bg: '#fef3c7', border: '#fcd34d' }, // amber + { bg: '#fce7f3', border: '#f9a8d4' }, // pink + { bg: '#e0e7ff', border: '#a5b4fc' }, // indigo + { bg: '#ccfbf1', border: '#5eead4' }, // teal + { bg: '#fee2e2', border: '#fca5a5' }, // red + { bg: '#f3e8ff', border: '#d8b4fe' }, // purple + ] + + // Generate a consistent color index from an email + function getSenderColorIndex(email) { + if (!email) return 0 + let hash = 0 + for (let i = 0; i < email.length; i++) { + hash = ((hash << 5) - hash + email.charCodeAt(i)) | 0 + } + return Math.abs(hash) % SENDER_COLORS.length + } + + function getSenderColor(email) { + return SENDER_COLORS[getSenderColorIndex(email)] + } + + function resolveParticipantIdentity(identity) { + const normalized = normalizeEmail(identity) + if (!normalized) return '' + if (normalized.includes('@')) return normalized + const current = normalizeEmail(getCurrentUserEmail()) + const domain = current.includes('@') ? current.split('@')[1] : '' + return domain ? `${normalized}@${domain}` : normalized + } + + function uniqueParticipantEmails(emails) { + return Array.from( + new Set( + (emails || []) + .map((email) => resolveParticipantIdentity(email)) + .filter((email) => email.length > 0), + ), + ) + } + + function describeFlowInputSpec(inputSpec) { + if (typeof inputSpec === 'string') { + return { type: inputSpec, defaultValue: null } + } + if (inputSpec && typeof inputSpec === 'object') { + return { + type: inputSpec.type || inputSpec.raw_type || inputSpec.rawType || 'String', + defaultValue: inputSpec.default ?? null, + } + } + return { type: 'String', defaultValue: null } + } + + function normalizeDefaultInputValue(defaultValue) { + if (defaultValue === null || defaultValue === undefined) return '' + if (Array.isArray(defaultValue)) { + return defaultValue + .map((value) => String(value).trim()) + .filter(Boolean) + .join(',') + } + if (typeof defaultValue === 'object') { + try { + return JSON.stringify(defaultValue) + } catch { + return '' + } + } + return String(defaultValue) + } + + function parseInputList(rawValue) { + if (Array.isArray(rawValue)) { + return rawValue.map((value) => String(value).trim()).filter(Boolean) + } + const text = String(rawValue || '') + .trim() + .replace(/\n/g, ',') + if (!text) return [] + return text + .split(',') + .map((value) => value.trim()) + .filter(Boolean) + } + + function isFileLikeInputType(type) { + const normalized = String(type || '').toLowerCase() + return ( + normalized.includes('file') || + normalized.includes('dir') || + normalized.includes('path') || + normalized.includes('dataset') || + normalized.includes('genotype') + ) + } + + function isListLikeInputType(type, defaultValue) { + const normalized = String(type || '').toLowerCase() + if (Array.isArray(defaultValue)) return true + return normalized.includes('list') || normalized.includes('array') || normalized.includes('[') + } + + function buildDefaultInvitationInputOverrides(flowSpec, participants) { + const overrides = {} + const inputs = flowSpec?.spec?.inputs || flowSpec?.inputs || {} + if (inputs.datasites && Array.isArray(participants) && participants.length > 0) { + const datasites = participants + .map((participant) => String(participant?.email || '').trim()) + .filter(Boolean) + if (datasites.length > 0) { + overrides['inputs.datasites'] = datasites.join(',') + } + } + return overrides + } + + function resolveRoleForUser(participants, currentUser) { + if (!Array.isArray(participants) || !currentUser) return null + const entry = participants.find((p) => emailsMatch(p?.email, currentUser)) + const role = String(entry?.role || '').trim() + return role || null + } + + function normalizeTargetList(targets) { + if (targets === null || targets === undefined) return ['all'] + if (Array.isArray(targets)) { + return targets.map((target) => String(target || '').trim()).filter(Boolean) + } + const text = String(targets || '').trim() + return text ? [text] : ['all'] + } + + function isTargetedForRole(targets, role, currentUser) { + const normalizedRole = String(role || '') + .trim() + .toLowerCase() + const normalizedUser = String(currentUser || '') + .trim() + .toLowerCase() + const list = normalizeTargetList(targets).map((target) => + String(target || '') + .trim() + .toLowerCase(), + ) + if (list.length === 0 || list.includes('all') || list.includes('*')) return true + if (normalizedRole && list.includes(normalizedRole)) return true + // Common group aliases used in flow specs (e.g. "only: clients") + if (normalizedRole && normalizedRole.startsWith('client')) { + if (list.includes('clients') || list.includes('client')) return true + } + if (normalizedRole === 'aggregator' && list.includes('aggregators')) return true + if (normalizedUser && list.includes(normalizedUser)) return true + return false + } + + function bindingReferencesInput(bindingValue, inputName) { + if (typeof bindingValue === 'string') { + return bindingValue.trim() === `inputs.${inputName}` + } + if (!bindingValue || typeof bindingValue !== 'object') return false + const value = typeof bindingValue.value === 'string' ? bindingValue.value.trim() : '' + return value === `inputs.${inputName}` + } + + function bindingAppliesToRole(bindingValue, role, currentUser) { + if (!bindingValue || typeof bindingValue !== 'object') return true + if (!Object.prototype.hasOwnProperty.call(bindingValue, 'only')) return true + return isTargetedForRole(bindingValue.only, role, currentUser) + } + + function shouldShowInvitationInput({ flowSpec, inputName, participants, currentUser }) { + const steps = flowSpec?.spec?.steps || flowSpec?.steps || [] + if (!Array.isArray(steps) || steps.length === 0) return true + const role = resolveRoleForUser(participants, currentUser) + let referenced = false + let relevant = false + steps.forEach((step) => { + const withMap = step?.with + if (!withMap || typeof withMap !== 'object') return + Object.values(withMap).forEach((bindingValue) => { + if (!bindingReferencesInput(bindingValue, inputName)) return + referenced = true + if ( + bindingAppliesToRole(bindingValue, role, currentUser) && + isTargetedForRole(step?.run?.targets, role, currentUser) + ) { + relevant = true + } + }) + }) + if (!referenced) return true + return relevant + } + + function shouldHideInputForRole(inputName, role, type) { + const normalizedRole = String(role || '') + .trim() + .toLowerCase() + if (!normalizedRole) return false + const normalizedName = String(inputName || '') + .trim() + .toLowerCase() + const normalizedType = String(type || '') + .trim() + .toLowerCase() + const isGenotypeInput = + normalizedName.includes('genotype') || normalizedType.includes('genotyperecord') + return isGenotypeInput && normalizedRole.startsWith('aggregator') + } + + function fileLooksLikeGenotype(file) { + const dataType = String(file?.data_type || '').toLowerCase() + if (dataType.includes('genotype')) return true + const path = String(file?.file_path || '').toLowerCase() + return ( + path.endsWith('.txt') || + path.endsWith('.vcf') || + path.endsWith('.vcf.gz') || + path.includes('/genotypes/') + ) + } + + async function promptFlowInputOverrides({ + flowName, + flowSpec, + initialOverrides = {}, + includeDatasites = false, + participants = [], + currentUser = '', + }) { + const inputs = flowSpec?.spec?.inputs || flowSpec?.inputs || {} + const role = resolveRoleForUser(participants, currentUser) + const entries = Object.entries(inputs).filter(([name, inputSpec]) => { + if (!includeDatasites && name === 'datasites') return false + const { type } = describeFlowInputSpec(inputSpec) + if (shouldHideInputForRole(name, role, type)) return false + return shouldShowInvitationInput({ + flowSpec, + inputName: name, + participants, + currentUser, + }) + }) + if (entries.length === 0) { + return { ...initialOverrides } + } + + let files = [] + try { + const loaded = await invoke('get_files') + files = Array.isArray(loaded) ? loaded : [] + } catch (error) { + console.warn('Failed to load local files for flow input picker:', error) + } + + const sortedFiles = files + .filter((file) => file && typeof file.file_path === 'string' && file.file_path.length > 0) + .sort((a, b) => { + const at = Date.parse(a.updated_at || a.created_at || '') || 0 + const bt = Date.parse(b.updated_at || b.created_at || '') || 0 + return bt - at + }) + + const modal = document.createElement('div') + modal.className = 'flow-input-picker-modal' + modal.innerHTML = ` +
+
+
+

Configure Flow Inputs

+

${escapeHtml(flowName || 'Flow')}

+
+
+ +
+ ` + document.body.appendChild(modal) + + const body = modal.querySelector('.flow-input-picker-body') + const fieldRefs = [] + + entries.forEach(([name, inputSpec]) => { + const inputKey = `inputs.${name}` + const { type, defaultValue } = describeFlowInputSpec(inputSpec) + const defaultText = normalizeDefaultInputValue(defaultValue) + const currentText = String(initialOverrides[inputKey] || defaultText || '').trim() + const listLike = isListLikeInputType(type, defaultValue) + const fileLike = isFileLikeInputType(type) + const row = document.createElement('div') + row.className = 'flow-input-picker-row' + + const label = document.createElement('label') + label.className = 'flow-input-picker-label' + label.textContent = name + + const hint = document.createElement('div') + hint.className = 'flow-input-picker-hint' + hint.textContent = `Type: ${type}` + label.appendChild(hint) + row.appendChild(label) + + if (fileLike && sortedFiles.length > 0) { + const selectedValues = new Set(parseInputList(currentText)) + let fileSelect = null + let fileCheckboxes = [] + let selectedValuesSet = null + if (listLike) { + selectedValuesSet = new Set(selectedValues) + if (selectedValuesSet.size === 0 && String(name).toLowerCase().includes('genotype')) { + const genotypeFiles = sortedFiles.filter(fileLooksLikeGenotype) + const preselectFiles = genotypeFiles.length > 0 ? genotypeFiles : sortedFiles + preselectFiles.forEach((file) => { + if (file?.file_path) selectedValuesSet.add(file.file_path) + }) + } + let filterTerm = '' + let filterType = '' + let lastClickedVisibleIndex = null + + const controls = document.createElement('div') + controls.className = 'flow-input-picker-controls' + + const searchInput = document.createElement('input') + searchInput.className = 'flow-input-picker-search' + searchInput.type = 'text' + searchInput.placeholder = 'Filter files by name, participant, type, source' + controls.appendChild(searchInput) + + const typeFilter = document.createElement('select') + typeFilter.className = 'flow-input-picker-type-filter' + typeFilter.innerHTML = '' + const allTypes = [ + ...new Set(sortedFiles.map((file) => String(file.data_type || '').trim())), + ] + .filter(Boolean) + .sort((a, b) => a.localeCompare(b)) + allTypes.forEach((type) => { + const option = document.createElement('option') + option.value = type + option.textContent = type + typeFilter.appendChild(option) + }) + controls.appendChild(typeFilter) + + row.appendChild(controls) + + const selectAllRow = document.createElement('label') + selectAllRow.className = 'flow-input-picker-select-all-row' + const selectAllCheckbox = document.createElement('input') + selectAllCheckbox.type = 'checkbox' + selectAllCheckbox.className = 'flow-input-picker-select-all' + const selectAllText = document.createElement('span') + selectAllText.textContent = 'Select all visible' + selectAllRow.appendChild(selectAllCheckbox) + selectAllRow.appendChild(selectAllText) + row.appendChild(selectAllRow) + + const fileList = document.createElement('div') + fileList.className = 'flow-input-picker-file-list' + row.appendChild(fileList) + + const table = document.createElement('table') + table.className = 'flow-input-picker-table' + table.innerHTML = ` + + + + Participant + Filename + Type + Source + + + ` + const tbody = document.createElement('tbody') + table.appendChild(tbody) + fileList.appendChild(table) + + const matchesFilter = (file) => { + if (filterType && String(file.data_type || '') !== filterType) return false + if (!filterTerm) return true + const term = filterTerm.toLowerCase() + const values = [ + file.file_path, + file.participant_id, + file.data_type, + file.source, + file.grch_version, + ] + .filter(Boolean) + .map((value) => String(value).toLowerCase()) + return values.some((value) => value.includes(term)) + } + + const updateSelectAllState = (visibleFiles) => { + if (!visibleFiles.length) { + selectAllCheckbox.checked = false + selectAllCheckbox.indeterminate = false + return + } + const selectedCount = visibleFiles.filter((file) => + selectedValuesSet.has(file.file_path), + ).length + selectAllCheckbox.checked = selectedCount === visibleFiles.length + selectAllCheckbox.indeterminate = + selectedCount > 0 && selectedCount < visibleFiles.length + } + + const renderFileRows = () => { + tbody.innerHTML = '' + fileCheckboxes = [] + lastClickedVisibleIndex = null + const visibleFiles = sortedFiles.filter(matchesFilter) + visibleFiles.forEach((file, visibleIndex) => { + const fullPath = file.file_path + const fileName = fullPath.split('/').pop() || fullPath + const rowEl = document.createElement('tr') + rowEl.className = 'flow-input-picker-file-row' + const participant = file.participant_id || '-' + const typeText = file.data_type || '-' + const sourceText = file.source || '-' + rowEl.innerHTML = ` + + ${escapeHtml(participant)} + ${escapeHtml(fileName)} + ${escapeHtml(typeText)} + ${escapeHtml(sourceText)} + ` + const checkbox = rowEl.querySelector('.flow-input-picker-checkbox') + fileCheckboxes.push(checkbox) + const setSelected = (checked) => { + if (checked) selectedValuesSet.add(fullPath) + else selectedValuesSet.delete(fullPath) + checkbox.checked = checked + } + const applyClick = (checked, shiftKey) => { + if (shiftKey && lastClickedVisibleIndex !== null) { + const start = Math.min(lastClickedVisibleIndex, visibleIndex) + const end = Math.max(lastClickedVisibleIndex, visibleIndex) + for (let idx = start; idx <= end; idx += 1) { + const target = visibleFiles[idx] + if (checked) selectedValuesSet.add(target.file_path) + else selectedValuesSet.delete(target.file_path) + } + renderFileRows() + return + } + setSelected(checked) + lastClickedVisibleIndex = visibleIndex + updateSelectAllState(visibleFiles) + } + rowEl.addEventListener('click', (event) => { + if (event.target?.tagName === 'INPUT') return + applyClick(!selectedValuesSet.has(fullPath), event.shiftKey) + }) + checkbox.addEventListener('click', (event) => { + event.stopPropagation() + applyClick(checkbox.checked, event.shiftKey) + }) + tbody.appendChild(rowEl) + }) + updateSelectAllState(visibleFiles) + } + + searchInput.addEventListener('input', () => { + filterTerm = String(searchInput.value || '').trim() + renderFileRows() + }) + typeFilter.addEventListener('change', () => { + filterType = String(typeFilter.value || '').trim() + renderFileRows() + }) + selectAllCheckbox.addEventListener('change', () => { + const visibleFiles = sortedFiles.filter(matchesFilter) + visibleFiles.forEach((file) => { + if (selectAllCheckbox.checked) selectedValuesSet.add(file.file_path) + else selectedValuesSet.delete(file.file_path) + }) + renderFileRows() + }) + + renderFileRows() + } else { + fileSelect = document.createElement('select') + fileSelect.className = 'flow-input-picker-select' + fileSelect.multiple = false + fileSelect.size = 1 + + const empty = document.createElement('option') + empty.value = '' + empty.textContent = 'Select a local file...' + fileSelect.appendChild(empty) + + sortedFiles.forEach((file) => { + const option = document.createElement('option') + const fullPath = file.file_path + const parts = fullPath.split('/') + const fileName = parts[parts.length - 1] || fullPath + const dataType = file.data_type ? ` (${file.data_type})` : '' + option.value = fullPath + option.textContent = `${fileName}${dataType}` + if (selectedValues.has(fullPath)) option.selected = true + fileSelect.appendChild(option) + }) + row.appendChild(fileSelect) + } + + const manual = document.createElement('input') + manual.className = 'flow-input-picker-text' + manual.type = 'text' + manual.value = currentText + manual.placeholder = listLike + ? 'Or enter comma-separated paths/values' + : 'Or enter a custom path/value' + row.appendChild(manual) + + fieldRefs.push({ + name, + inputKey, + fileLike, + listLike, + defaultText, + fileSelect, + fileCheckboxes, + selectedValuesSet, + manual, + }) + } else { + const input = document.createElement('input') + input.className = 'flow-input-picker-text' + input.type = 'text' + input.value = currentText + input.placeholder = defaultText ? `Default: ${defaultText}` : 'Enter value' + row.appendChild(input) + fieldRefs.push({ + name, + inputKey, + fileLike: false, + listLike, + defaultText, + textInput: input, + }) + } + + body.appendChild(row) + }) + + return new Promise((resolve) => { + const cleanup = () => { + modal.remove() + } + const cancel = () => { + cleanup() + resolve(null) + } + const confirm = () => { + const overrides = { ...initialOverrides } + + fieldRefs.forEach((field) => { + let value = '' + if (field.fileLike) { + const selected = field.listLike + ? Array.from(field.selectedValuesSet || []).filter(Boolean) + : Array.from(field.fileSelect?.selectedOptions || []) + .map((option) => option.value) + .filter(Boolean) + const manualValues = parseInputList(field.manual?.value || '') + const combined = selected.length > 0 ? selected : manualValues + if (field.listLike) { + value = combined.join(',') + } else { + value = combined[0] || '' + } + } else { + value = String(field.textInput?.value || '').trim() + } + + if (!value && field.defaultText) { + value = field.defaultText + } + + if (value) { + overrides[field.inputKey] = value + } else { + delete overrides[field.inputKey] + } + }) + + cleanup() + resolve(overrides) + } + + modal.querySelector('.flow-input-picker-backdrop')?.addEventListener('click', cancel) + modal.querySelector('.flow-input-picker-cancel')?.addEventListener('click', cancel) + modal.querySelector('.flow-input-picker-confirm')?.addEventListener('click', confirm) + }) + } + + // Render participant chips with colors for thread header + function renderParticipantChips(participants, currentUserEmail) { + if (!participants || participants.length === 0) return '' + const normalizedParticipants = uniqueParticipantEmails(participants) + const others = normalizedParticipants.filter((p) => !emailsMatch(p, currentUserEmail)) + if (others.length === 0) return '' + + return others + .map((email) => { + const color = getSenderColor(email) + return `${escapeHtml(email)}` + }) + .join('') + } + + // ============================================================================ + // EMAIL CHIP INPUT + // ============================================================================ + + let recipientChips = [] + + function initializeChipInput() { + const recipientInput = document.getElementById('message-recipient-input') + if (!recipientInput) return + + // Create wrapper if not exists + let wrapper = recipientInput.parentElement + if (!wrapper.classList.contains('chip-input-wrapper')) { + wrapper = document.createElement('div') + wrapper.className = 'chip-input-wrapper' + recipientInput.parentElement.insertBefore(wrapper, recipientInput) + wrapper.appendChild(recipientInput) + } + + // Create chips container if not exists + let chipsContainer = wrapper.querySelector('.chips-container') + if (!chipsContainer) { + chipsContainer = document.createElement('div') + chipsContainer.className = 'chips-container' + wrapper.insertBefore(chipsContainer, recipientInput) + } + + // Handle input events + recipientInput.addEventListener('keydown', handleChipInputKeydown) + recipientInput.addEventListener('blur', handleChipInputBlur) + + return { wrapper, chipsContainer } + } + + function handleChipInputKeydown(e) { + const input = e.target + const value = input.value.trim() + + // Comma or Enter adds chip + if ((e.key === ',' || e.key === 'Enter') && value) { + e.preventDefault() + const email = value.replace(/,/g, '').trim() + if (email && isValidEmail(email)) { + addRecipientChip(email) + input.value = '' + } + } + + // Backspace on empty input removes last chip + if (e.key === 'Backspace' && !input.value && recipientChips.length > 0) { + removeRecipientChip(recipientChips.length - 1) + } + } + + function handleChipInputBlur(e) { + const input = e.target + const value = input.value.trim() + if (value && isValidEmail(value)) { + addRecipientChip(value) + input.value = '' + } + } + + function isValidEmail(email) { + return /^[^\s@]+@[^\s@]+\.[^\s@]+$/.test(email) + } + + function addRecipientChip(email) { + // Normalize and check for duplicates (case-insensitive) + const normalizedEmail = email.toLowerCase().trim() + if (recipientChips.some((e) => e.toLowerCase().trim() === normalizedEmail)) return + recipientChips.push(email) + renderRecipientChips() + } + + function removeRecipientChip(index) { + recipientChips.splice(index, 1) + renderRecipientChips() + } + + function renderRecipientChips() { + const recipientInput = document.getElementById('message-recipient-input') + if (!recipientInput) return + + const wrapper = recipientInput.closest('.chip-input-wrapper') + if (!wrapper) return + + let chipsContainer = wrapper.querySelector('.chips-container') + if (!chipsContainer) return + + chipsContainer.innerHTML = recipientChips + .map( + (email, idx) => ` + + `, + ) + .join('') + + // Add click handlers for remove buttons + chipsContainer.querySelectorAll('.chip-remove').forEach((btn) => { + btn.addEventListener('click', (e) => { + e.preventDefault() + e.stopPropagation() + const idx = parseInt(btn.dataset.index, 10) + removeRecipientChip(idx) + }) + }) + + // Update placeholder + recipientInput.placeholder = recipientChips.length > 0 ? 'Add more...' : 'recipient@example.com' + } + + function getRecipientEmails() { + const recipientInput = document.getElementById('message-recipient-input') + const inputValue = recipientInput?.value.trim() || '' + + // Combine chips and any text in input + const emails = [...recipientChips] + if (inputValue && isValidEmail(inputValue)) { + emails.push(inputValue) + } + + return emails + } + + function clearRecipientChips() { + recipientChips = [] + const recipientInput = document.getElementById('message-recipient-input') + if (recipientInput) { + recipientInput.value = '' + } + renderRecipientChips() + } + + function setRecipientChips(emails) { + recipientChips = [...emails] + renderRecipientChips() + } let syftboxAutoStartDisabled = null // Refresh rate: 2s in dev/test mode, 10s in production @@ -126,6 +892,21 @@ export function createMessagesModule({ } } + function getFlowInvitationFromMessage(msg) { + if (!msg) return null + const meta = normalizeMetadata(msg.metadata) + if (!meta || !meta.flow_invitation) return null + const invitation = meta.flow_invitation + if (!invitation.flow_name || !invitation.session_id) return null + return { + flow_name: invitation.flow_name, + session_id: invitation.session_id, + participants: Array.isArray(invitation.participants) ? invitation.participants : [], + flow_spec: invitation.flow_spec, + sender: msg.from, + } + } + function parseSyftUrl(syftUrl) { if (!syftUrl || typeof syftUrl !== 'string') return null const match = syftUrl.match(/^syft:\/\/([^/]+)\/(.+)$/i) @@ -489,12 +1270,27 @@ export function createMessagesModule({ function collectParticipants(messages) { const set = new Set() messages.forEach((msg) => { - if (msg.from) set.add(normalizeEmail(msg.from)) - if (msg.to) set.add(normalizeEmail(msg.to)) + if (msg.from) set.add(resolveParticipantIdentity(msg.from)) + if (msg.to) set.add(resolveParticipantIdentity(msg.to)) }) return Array.from(set) } + function collectGroupParticipantsFromMessages(messages) { + const participants = new Set() + ;(messages || []).forEach((msg) => { + const meta = normalizeMetadata(msg?.metadata) + const groupParticipants = meta?.group_chat?.participants + if (!Array.isArray(groupParticipants)) return + groupParticipants.forEach((email) => { + if (typeof email === 'string' && email.trim()) { + participants.add(resolveParticipantIdentity(email)) + } + }) + }) + return Array.from(participants) + } + function formatParticipants(participants) { if (!participants || participants.length === 0) return '' const currentUserEmail = getCurrentUserEmail() @@ -1080,7 +1876,7 @@ export function createMessagesModule({ item.classList.add('unread') } - const participants = thread.participants || [] + const participants = uniqueParticipantEmails(thread.participants || []) const others = participants.filter((p) => !emailsMatch(p, currentUserEmail)) // Check if this is a self-message thread (only participant is current user) @@ -1127,6 +1923,11 @@ export function createMessagesModule({ !thread.session_id && thread.has_module ? 'Module' : '' + const isGroupChat = participants.length >= 3 + const groupBadge = + !thread.session_id && !thread.has_module && isGroupChat + ? 'Group' + : '' // For session threads, show participants in subject line const sessionParticipantsLine = isSessionThread @@ -1147,6 +1948,7 @@ export function createMessagesModule({ } ${sessionBadge} ${moduleBadge} + ${groupBadge} ${sessionParticipantsLine}
${escapeHtml(thread.last_message_preview || '')}
@@ -1230,12 +2032,19 @@ export function createMessagesModule({ renderModulePanel(messages) const summary = messageThreads.find((thread) => thread.thread_id === threadId) - const participants = summary ? summary.participants : collectParticipants(messages) + const messageParticipants = collectParticipants(messages) + const groupParticipants = collectGroupParticipantsFromMessages(messages) + const participants = Array.from( + new Set([...(summary?.participants || []), ...messageParticipants, ...groupParticipants]), + ) + const normalizedParticipants = uniqueParticipantEmails(participants) + threadParticipantsById.set(threadId, normalizedParticipants) const currentUserEmail = getCurrentUserEmail() // Check if this is a self-message thread const isSelfThread = - participants.length === 1 && emailsMatch(participants[0], currentUserEmail) + normalizedParticipants.length === 1 && + emailsMatch(normalizedParticipants[0], currentUserEmail) // Check if this is a session thread const isSessionThread = !!(summary && summary.session_id) @@ -1257,26 +2066,32 @@ export function createMessagesModule({ const participantsEl = document.getElementById('message-thread-participants') if (participantsEl) { if (isSessionThread) { - const formatted = formatParticipants(participants) - participantsEl.textContent = formatted - ? `Session with ${formatted}` - : 'Secure collaborative session' + const chipsHtml = renderParticipantChips(normalizedParticipants, currentUserEmail) + if (chipsHtml) { + participantsEl.innerHTML = `Session with ${chipsHtml}` + } else { + participantsEl.textContent = 'Secure collaborative session' + } } else if (isSelfThread) { participantsEl.textContent = subjectText && subjectText !== NO_SUBJECT_PLACEHOLDER ? subjectText : 'Personal notes' } else { - const formatted = formatParticipants(participants) - participantsEl.textContent = formatted || '' + const chipsHtml = renderParticipantChips(normalizedParticipants, currentUserEmail) + if (chipsHtml) { + participantsEl.innerHTML = chipsHtml + } else { + participantsEl.textContent = '' + } } } - updateConversationAvatar(participants, isSelfThread, isSessionThread) + updateConversationAvatar(normalizedParticipants, isSelfThread, isSessionThread) const recipientInput = document.getElementById('message-recipient-input') if (recipientInput) { recipientInput.readOnly = true if (!preserveComposeDraft) { - recipientInput.value = getPrimaryRecipient(participants) + recipientInput.value = getPrimaryRecipient(normalizedParticipants) } } @@ -1297,6 +2112,7 @@ export function createMessagesModule({ } updateMessagesEmptyState() + updateProposeFlowButton() } function startNewMessage(prefillRecipient = null) { @@ -1316,9 +2132,13 @@ export function createMessagesModule({ const recipientInput = document.getElementById('message-recipient-input') if (recipientInput) { + // Clear any existing chips and set up for new message + clearRecipientChips() + if (prefillRecipient) { + addRecipientChip(prefillRecipient) + } contactAutocomplete.attachToInputs(['message-recipient-input']) recipientInput.readOnly = false - recipientInput.value = prefillRecipient || '' recipientInput.focus() } @@ -1336,6 +2156,7 @@ export function createMessagesModule({ renderMessageThreads() updateMessagesEmptyState() + updateProposeFlowButton() } // ============================================================================ @@ -1345,6 +2166,8 @@ export function createMessagesModule({ async function initializeMessagesTab(forceSync = false) { if (messagesInitialized && !forceSync) return + // Initialize chip input for recipients + initializeChipInput() contactAutocomplete.attachToInputs(['message-recipient-input']) await ensureMessagesAuthorization() @@ -1362,16 +2185,31 @@ export function createMessagesModule({ } async function sendCurrentMessage() { - const recipientInput = document.getElementById('message-recipient-input') const subjectInput = document.getElementById('message-compose-subject') const bodyInput = document.getElementById('message-compose-body') - const recipient = recipientInput?.value.trim() const subject = subjectInput?.value.trim() const body = bodyInput?.value.trim() - if (!recipient) { - alert('Please enter a recipient') + // Get recipients - from chips if composing new, from thread if replying + let recipients = [] + if (isComposingNewMessage) { + recipients = getRecipientEmails() + } else if (activeThreadId) { + // Replying to existing thread - use thread participants (excluding self) + const currentUser = getCurrentUserEmail() + const thread = messageThreads.find((t) => t.thread_id === activeThreadId) + const knownParticipants = + threadParticipantsById.get(activeThreadId) || thread?.participants || [] + if (knownParticipants.length > 0) { + recipients = knownParticipants.filter((p) => !emailsMatch(p, currentUser)) + } + } + + recipients = uniqueParticipantEmails(recipients) + + if (recipients.length === 0) { + alert('Please enter at least one recipient') return } if (!body) { @@ -1386,54 +2224,59 @@ export function createMessagesModule({ return } - // Check if recipient has a key in our contacts - const contactCheck = await invoke('key_check_contact', { email: recipient }) - - if (!contactCheck.has_key) { - // No key locally - check if they're on the network - if (contactCheck.is_on_network) { - // They're on network but not trusted - prompt to add them first - const goToNetwork = await dialog.ask( - `${recipient} is on the BioVault network but you haven't added them to your contacts yet.\n\nGo to Network tab to add and verify their key before messaging.`, - { - title: 'Contact Not Added', - kind: 'warning', - okLabel: 'Go to Network', - cancelLabel: 'Cancel', - }, - ) - if (goToNetwork) { - // Navigate to network tab - const event = new CustomEvent('navigate-to-tab', { detail: { tab: 'network' } }) - window.dispatchEvent(event) - } - return - } else { - // Not on network at all - show invite modal - const sendInvite = await dialog.ask( - `${recipient} doesn't appear to be on the BioVault network yet.\n\nWould you like to invite them?`, - { - title: 'Recipient Not Found', - kind: 'info', - okLabel: 'Send Invite', - cancelLabel: 'Cancel', - }, - ) - if (sendInvite) { - await showInviteOptions('message') + for (const recipient of recipients) { + const contactCheck = await invoke('key_check_contact', { email: recipient }) + + if (!contactCheck.has_key) { + if (contactCheck.is_on_network) { + const goToNetwork = await dialog.ask( + `${recipient} is on the BioVault network but you haven't added them to your contacts yet.\n\nGo to Network tab to add and verify their key before messaging.`, + { + title: 'Contact Not Added', + kind: 'warning', + okLabel: 'Go to Network', + cancelLabel: 'Cancel', + }, + ) + if (goToNetwork) { + const event = new CustomEvent('navigate-to-tab', { detail: { tab: 'network' } }) + window.dispatchEvent(event) + } + return + } else { + const sendInvite = await dialog.ask( + `${recipient} doesn't appear to be on the BioVault network yet.\n\nWould you like to invite them?`, + { + title: 'Recipient Not Found', + kind: 'info', + okLabel: 'Send Invite', + cancelLabel: 'Cancel', + }, + ) + if (sendInvite) { + await showInviteOptions('message') + } + return } - return } } - const sent = await invoke('send_message', { - request: { - to: recipient, - subject: subject || NO_SUBJECT_PLACEHOLDER, - body, - reply_to: messageReplyTargetId, - }, - }) + const request = + recipients.length === 1 + ? { + to: recipients[0], + subject: subject || NO_SUBJECT_PLACEHOLDER, + body, + reply_to: messageReplyTargetId, + } + : { + recipients, + subject: subject || NO_SUBJECT_PLACEHOLDER, + body, + reply_to: messageReplyTargetId, + } + + const sent = await invoke('send_message', { request }) const threadKey = sent.thread_id || sent.id @@ -1445,6 +2288,8 @@ export function createMessagesModule({ } if (bodyInput) bodyInput.value = '' + // Clear recipient chips after successful send + clearRecipientChips() } catch (error) { console.error('Failed to send message:', error) alert(`Failed to send: ${error}`) @@ -1750,12 +2595,38 @@ export function createMessagesModule({ return } + // Deduplicate messages by ID and by content+sender (with relaxed timestamp matching) + const seenIds = new Set() + const seenContent = new Set() + const dedupedMessages = messages.filter((msg) => { + // Check by ID first + if (msg.id) { + if (seenIds.has(msg.id)) return false + seenIds.add(msg.id) + } + // Normalize timestamp to minute precision to catch duplicates with slight time differences + let normalizedTime = '' + if (msg.created_at) { + try { + const d = new Date(msg.created_at) + normalizedTime = `${d.getFullYear()}-${d.getMonth()}-${d.getDate()}-${d.getHours()}-${d.getMinutes()}` + } catch { + normalizedTime = msg.created_at + } + } + // Also check by content hash to catch duplicates with different IDs + const contentKey = `${msg.from || ''}|${msg.body || ''}|${normalizedTime}` + if (seenContent.has(contentKey)) return false + seenContent.add(contentKey) + return true + }) + // Group consecutive messages from the same sender, with date awareness const groups = [] let currentGroup = null let lastDateKey = null - messages.forEach((msg, index) => { + dedupedMessages.forEach((msg, index) => { const isOutgoing = emailsMatch(msg.from, currentUser) const isSelfMessage = emailsMatch(msg.from, msg.to) || @@ -1826,6 +2697,13 @@ export function createMessagesModule({ if (!isFirst && !isLast) bubbleClass += ' middle' msgDiv.className = bubbleClass + // Apply consistent color to incoming messages (not outgoing, not self) + if (!group.isOutgoing && !group.isSelfMessage) { + const senderColor = getSenderColor(msg.from) + msgDiv.style.backgroundColor = senderColor.bg + msgDiv.style.borderColor = senderColor.border + } + // Message body const body = document.createElement('div') body.className = 'message-bubble-body' @@ -1893,6 +2771,9 @@ export function createMessagesModule({ let runActions = null let runButtons = null let joinBtn = null + let runSelect = null + let runSelectOpenBtn = null + let runSelectSendBtn = null const updateRunButtons = (flow) => { if (!runButtons) return @@ -1906,7 +2787,65 @@ export function createMessagesModule({ } } - if (!group.isOutgoing) { + const refreshFlowRequestActions = async () => { + if (!runActions) return + try { + const [flows, runs] = await Promise.all([ + invoke('get_flows'), + invoke('get_flow_runs'), + ]) + const flow = (flows || []).find((p) => p?.name === flowRequest.flow_name) + updateRunButtons(flow) + + if (!runSelect || !runSelectOpenBtn || !runSelectSendBtn) return + + if (!flow) { + runSelect.__runMap = null + runSelect.__flowRef = null + runSelect.innerHTML = '' + runSelect.disabled = true + runSelectOpenBtn.style.display = 'none' + runSelectSendBtn.style.display = 'none' + return + } + + const matchingRuns = (runs || []).filter( + (run) => run.flow_id === flow.id && run.status === 'success', + ) + if (matchingRuns.length === 0) { + runSelect.__runMap = new Map() + runSelect.__flowRef = flow + runSelect.innerHTML = '' + runSelect.disabled = true + runSelectOpenBtn.style.display = 'none' + runSelectSendBtn.style.display = 'none' + return + } + + runSelect.__runMap = new Map(matchingRuns.map((run) => [run.id, run])) + runSelect.__flowRef = flow + runSelect.innerHTML = matchingRuns + .map( + (run) => + ``, + ) + .join('') + runSelect.disabled = false + runSelectOpenBtn.style.display = '' + runSelectSendBtn.style.display = '' + } catch (error) { + console.error('Failed to refresh flow request actions:', error) + if (runSelect) { + runSelect.innerHTML = '' + } + } + } + + const currentUser = getCurrentUserEmail() + const requestSender = flowRequest.sender || msg.from + const isRequestSender = emailsMatch(requestSender, currentUser) + + if (!group.isOutgoing && !isRequestSender) { const syncBtn = document.createElement('button') syncBtn.className = 'secondary' syncBtn.textContent = 'Sync Request' @@ -1966,13 +2905,7 @@ export function createMessagesModule({ { title: 'Flow Imported', type: 'info' }, ) - try { - const flows = await invoke('get_flows') - const flow = (flows || []).find((p) => p?.name === flowRequest.flow_name) - updateRunButtons(flow) - } catch (error) { - console.warn('Failed to refresh flow availability:', error) - } + await refreshFlowRequestActions() } catch (error) { console.error('Failed to import flow:', error) await dialog.message('Failed to import flow: ' + (error?.message || error), { @@ -2073,13 +3006,33 @@ export function createMessagesModule({ joinBtn.className = 'secondary' joinBtn.disabled = true joinBtn.addEventListener('click', async () => { - const flow = runButtons?.flow + let flow = runButtons?.flow if (!flow) { - await dialog.message('Import the flow first before joining.', { - title: 'Flow Required', - type: 'warning', - }) - return + // Self-heal: try import+refresh so join does not look stuck. + try { + if (flowRequest.flow_location) { + await invoke('import_flow_from_request', { + name: flowRequest.flow_name, + flowLocation: flowRequest.flow_location, + overwrite: false, + }) + } + } catch (error) { + console.warn('Auto-import before join failed:', error) + } + for (let i = 0; i < 8; i++) { + await refreshFlowRequestActions() + flow = runButtons?.flow + if (flow) break + await new Promise((r) => setTimeout(r, 400)) + } + if (!flow) { + await dialog.message('Import the flow first before joining.', { + title: 'Flow Required', + type: 'warning', + }) + return + } } const inputOverrides = {} if ( @@ -2089,13 +3042,31 @@ export function createMessagesModule({ ) { inputOverrides['inputs.datasites'] = flowRequest.datasites.join(',') } + const selectedOverrides = await promptFlowInputOverrides({ + flowName: flowRequest.flow_name || flow?.name || 'Flow', + flowSpec: flow, + initialOverrides: inputOverrides, + includeDatasites: false, + }) + if (!selectedOverrides) { + joinBtn.disabled = false + joinBtn.textContent = 'Join Run' + return + } try { + joinBtn.disabled = true + joinBtn.textContent = 'Joining...' await invoke('run_flow', { flowId: flow.id, - inputOverrides, + inputOverrides: selectedOverrides, runId: flowRequest.run_id, }) + await refreshFlowRequestActions() + joinBtn.textContent = 'Join Run' + joinBtn.disabled = false } catch (error) { + joinBtn.disabled = false + joinBtn.textContent = 'Join Run' console.error('Failed to start collaborative run:', error) await dialog.message( `Failed to start collaborative run: ${error?.message || error}`, @@ -2111,18 +3082,18 @@ export function createMessagesModule({ const resultsActions = document.createElement('div') resultsActions.className = 'invite-actions' - const runSelect = document.createElement('select') + runSelect = document.createElement('select') runSelect.className = 'form-control' runSelect.style.flex = '1' runSelect.innerHTML = '' runSelect.disabled = true - const openBtn = document.createElement('button') - openBtn.textContent = 'Show in Finder' - openBtn.className = 'secondary' - openBtn.disabled = true - openBtn.style.display = 'none' - openBtn.addEventListener('click', async () => { + runSelectOpenBtn = document.createElement('button') + runSelectOpenBtn.textContent = 'Show in Finder' + runSelectOpenBtn.className = 'secondary' + runSelectOpenBtn.disabled = true + runSelectOpenBtn.style.display = 'none' + runSelectOpenBtn.addEventListener('click', async () => { const runId = parseInt(runSelect.value, 10) if (!runId) return const run = runSelect.__runMap?.get(runId) @@ -2146,11 +3117,11 @@ export function createMessagesModule({ } }) - const sendBtn = document.createElement('button') - sendBtn.textContent = 'Send Back' - sendBtn.disabled = true - sendBtn.style.display = 'none' - sendBtn.addEventListener('click', async () => { + runSelectSendBtn = document.createElement('button') + runSelectSendBtn.textContent = 'Send Back' + runSelectSendBtn.disabled = true + runSelectSendBtn.style.display = 'none' + runSelectSendBtn.addEventListener('click', async () => { const runId = parseInt(runSelect.value, 10) if (!runId) return @@ -2368,7 +3339,7 @@ export function createMessagesModule({ } try { - sendBtn.disabled = true + runSelectSendBtn.disabled = true await invoke('send_flow_request_results', { requestId: msg.id, runId, @@ -2386,76 +3357,40 @@ export function createMessagesModule({ type: 'error', }) } finally { - sendBtn.disabled = false + runSelectSendBtn.disabled = false } }) }) resultsActions.appendChild(runSelect) - resultsActions.appendChild(openBtn) - resultsActions.appendChild(sendBtn) + resultsActions.appendChild(runSelectOpenBtn) + resultsActions.appendChild(runSelectSendBtn) requestCard.appendChild(resultsActions) - ;(async () => { - try { - const [flows, runs] = await Promise.all([ - invoke('get_flows'), - invoke('get_flow_runs'), - ]) - const flow = (flows || []).find((p) => p?.name === flowRequest.flow_name) - updateRunButtons(flow) - if (!flow) { - runSelect.innerHTML = '' - return - } - - const matchingRuns = (runs || []).filter( - (run) => run.flow_id === flow.id && run.status === 'success', - ) - if (matchingRuns.length === 0) { - runSelect.innerHTML = '' - return - } - runSelect.__runMap = new Map(matchingRuns.map((run) => [run.id, run])) - runSelect.__flowRef = flow - runSelect.innerHTML = matchingRuns - .map( - (run) => - ``, - ) - .join('') - runSelect.disabled = false - openBtn.style.display = '' - sendBtn.style.display = '' - - const updateActionState = async () => { - const runId = parseInt(runSelect.value, 10) - const hasSelection = Number.isFinite(runId) - openBtn.disabled = !hasSelection - let hasResults = false - if (hasSelection) { - const run = runSelect.__runMap?.get(runId) - const resultsDir = run?.results_dir || run?.work_dir - if (resultsDir) { - try { - const entries = await invoke('list_results_tree', { root: resultsDir }) - hasResults = (entries || []).some((entry) => !entry.is_dir) - } catch { - hasResults = false - } - } + const updateActionState = async () => { + const runId = parseInt(runSelect.value, 10) + const hasSelection = Number.isFinite(runId) + runSelectOpenBtn.disabled = !hasSelection + let hasResults = false + if (hasSelection) { + const run = runSelect.__runMap?.get(runId) + const resultsDir = run?.results_dir || run?.work_dir + if (resultsDir) { + try { + const entries = await invoke('list_results_tree', { root: resultsDir }) + hasResults = (entries || []).some((entry) => !entry.is_dir) + } catch { + hasResults = false } - sendBtn.disabled = !hasSelection || !hasResults - sendBtn.style.display = hasResults ? '' : 'none' } - runSelect.addEventListener('change', updateActionState) - updateActionState() - } catch (error) { - console.error('Failed to load flow runs:', error) - runSelect.innerHTML = '' } + runSelectSendBtn.disabled = !hasSelection || !hasResults + runSelectSendBtn.style.display = hasResults ? '' : 'none' + } + runSelect.addEventListener('change', updateActionState) + ;(async () => { + await refreshFlowRequestActions() + await updateActionState() })() } @@ -2563,6 +3498,555 @@ export function createMessagesModule({ msgDiv.appendChild(resultsCard) } + // Flow invitation card + const flowInvitation = getFlowInvitationFromMessage(msg) + if (flowInvitation) { + const invitationCard = document.createElement('div') + invitationCard.className = 'flow-invitation-card' + + // Find the current user's role + const currentUser = getCurrentUserEmail() + const myParticipant = flowInvitation.participants.find((p) => + emailsMatch(p.email, currentUser), + ) + const myRole = myParticipant?.role || null + + const participantsHtml = flowInvitation.participants + .map((p) => { + const isMe = emailsMatch(p.email, currentUser) + return `👤 ${escapeHtml(p.email)} (${escapeHtml(p.role)})${isMe ? ' ← you' : ''}` + }) + .join(' ') + + invitationCard.innerHTML = ` +
+ 🔄 + ${escapeHtml(flowInvitation.flow_name)} +
+ ${myRole ? `
Your role: ${escapeHtml(myRole)}
` : ''} +
${participantsHtml}
+
+ ` + + const actions = document.createElement('div') + actions.className = 'flow-invitation-actions' + + const statusEl = invitationCard.querySelector('.flow-invitation-status') + + // Check if this exact flow name exists locally. + // Do not infer from metadata name to avoid false positives. + const checkFlowExists = async () => { + try { + const flows = await invoke('get_flows') + return (flows || []).some((f) => f.name === flowInvitation.flow_name) + } catch { + return false + } + } + + const invitationSender = + flowInvitation?.proposed_by || + flowInvitation?.sender || + flowInvitation?.from || + msg.from + const flowLocation = + flowInvitation?.flow_location || + (invitationSender && flowInvitation?.session_id + ? `syft://${invitationSender}/shared/flows/${flowInvitation.flow_name}/${flowInvitation.session_id}/_flow_source` + : null) + + const syncFlowLocation = async (location) => { + const parsed = parseSyftUrl(location) + if (!parsed?.datasite || !parsed?.path) return + const rawPath = String(parsed.path || '').replace(/^\/+/, '') + const parentPath = `${parsed.datasite}/${rawPath.replace(/\/_flow_source\/?$/i, '')}` + const sourcePath = `${parsed.datasite}/${rawPath}` + try { + await invoke('sync_tree_set_subscription', { + path: parentPath, + allow: true, + isDir: true, + }) + } catch (error) { + console.warn('[Flow Import] Failed to set parent subscription:', error) + } + try { + await invoke('sync_tree_set_subscription', { + path: sourcePath, + allow: true, + isDir: true, + }) + } catch (error) { + console.warn('[Flow Import] Failed to set source subscription:', error) + } + try { + await invoke('trigger_syftbox_sync') + } catch {} + } + + const waitForFlowSourceReady = async (location, maxAttempts = 30) => { + let localRoot = null + try { + localRoot = await invoke('resolve_syft_url_to_local_path', { syftUrl: location }) + } catch (error) { + console.warn('[Flow Import] Failed to resolve syft url:', error) + return false + } + if (!localRoot) return false + + for (let attempt = 1; attempt <= maxAttempts; attempt += 1) { + try { + const rootExists = await invoke('path_exists', { path: localRoot }) + const flowYamlExists = await invoke('path_exists', { + path: `${localRoot}/flow.yaml`, + }) + const flowYmlExists = await invoke('path_exists', { + path: `${localRoot}/flow.yml`, + }) + if (rootExists && (flowYamlExists || flowYmlExists)) { + return true + } + } catch (error) { + console.warn('[Flow Import] Failed while checking source path:', error) + } + await syncFlowLocation(location) + await new Promise((resolve) => setTimeout(resolve, 900)) + } + return false + } + + const withTimeout = async (promise, ms, label) => { + let timeoutId + const timeoutPromise = new Promise((_, reject) => { + timeoutId = setTimeout(() => { + reject(new Error(`${label} timed out after ${ms}ms`)) + }, ms) + }) + try { + return await Promise.race([promise, timeoutPromise]) + } finally { + clearTimeout(timeoutId) + } + } + + const getLocalFlowSourcePath = async (location) => { + try { + return await invoke('resolve_syft_url_to_local_path', { syftUrl: location }) + } catch { + return null + } + } + + const importFlowFromInvitation = async (overwrite = false) => { + const flowSpec = flowInvitation.flow_spec + if (flowLocation) { + if (statusEl) statusEl.textContent = 'Syncing flow files...' + await syncFlowLocation(flowLocation) + const sourceReady = await waitForFlowSourceReady(flowLocation, 30) + if (!sourceReady) { + const localPath = await getLocalFlowSourcePath(flowLocation) + if (statusEl && localPath) { + statusEl.textContent = `⚠ Flow source not ready at: ${localPath}` + } + throw new Error( + localPath + ? `Flow source not available yet at ${localPath}. Please retry.` + : 'Flow source not available yet. Please retry in a few seconds.', + ) + } + let lastError = null + for (let attempt = 1; attempt <= 20; attempt += 1) { + try { + if (statusEl) { + statusEl.textContent = `Importing flow... (attempt ${attempt}/20)` + } + return await withTimeout( + invoke('import_flow_from_request', { + name: flowInvitation.flow_name, + flowLocation, + overwrite, + }), + 45000, + 'import_flow_from_request', + ) + } catch (error) { + lastError = error + console.warn( + `[Flow Import] import_flow_from_request attempt ${attempt}/20 failed:`, + error, + ) + // Flow source files can arrive slightly later than the invitation message. + // Nudge sync and retry before giving up. + await waitForFlowSourceReady(flowLocation, 1) + await syncFlowLocation(flowLocation) + await new Promise((resolve) => setTimeout(resolve, 900)) + } + } + throw ( + lastError || + new Error( + 'Flow source was not available yet. Please sync and try importing again.', + ) + ) + } + if (!flowSpec) throw new Error('No flow specification in invitation') + return await invoke('import_flow_from_json', { + request: { + name: flowInvitation.flow_name, + flow_json: flowSpec, + overwrite, + }, + }) + } + + const importBtn = document.createElement('button') + importBtn.className = 'flow-invitation-btn import-btn' + importBtn.textContent = '📥 Import Flow' + + const joinBtn = document.createElement('button') + joinBtn.className = 'flow-invitation-btn view-runs-btn' + joinBtn.textContent = '🤝 Join Flow' + joinBtn.style.display = 'none' + + const syncBtn = document.createElement('button') + syncBtn.className = 'flow-invitation-btn' + syncBtn.textContent = '🔄 Sync Flow Files' + syncBtn.addEventListener('click', async () => { + if (!flowLocation) { + if (statusEl) statusEl.textContent = '⚠ No flow location available in invitation.' + return + } + syncBtn.disabled = true + const originalText = syncBtn.textContent + syncBtn.textContent = 'Syncing...' + try { + if (statusEl) statusEl.textContent = 'Syncing flow files...' + await syncFlowLocation(flowLocation) + const sourceReady = await waitForFlowSourceReady(flowLocation, 30) + if (sourceReady) { + if (statusEl) statusEl.textContent = '✓ Flow files synced and ready to import' + } else { + const localPath = await getLocalFlowSourcePath(flowLocation) + if (statusEl) { + statusEl.textContent = localPath + ? `⚠ Sync requested, but flow files are not ready yet at ${localPath}` + : '⚠ Sync requested, but flow files are not ready yet. Try again shortly.' + } + } + } catch (error) { + console.error('Failed to sync flow files:', error) + if (statusEl) statusEl.textContent = `⚠ Sync failed: ${error}` + } finally { + syncBtn.disabled = false + syncBtn.textContent = originalText + } + }) + + const showFilesBtn = document.createElement('button') + showFilesBtn.className = 'flow-invitation-btn' + showFilesBtn.textContent = '📂 Show Flow Files' + showFilesBtn.addEventListener('click', async () => { + try { + if (flowLocation) { + await syncFlowLocation(flowLocation) + const folderPath = await invoke('resolve_syft_url_to_local_path', { + syftUrl: flowLocation, + }) + const exists = await invoke('path_exists', { path: folderPath }) + if (!exists) { + if (statusEl) { + statusEl.textContent = + '⚠ Flow folder not synced yet. Click again after a few seconds.' + } + return + } + await invoke('open_folder', { path: folderPath }) + if (statusEl) statusEl.textContent = `📂 Opened: ${folderPath}` + return + } + + const flows = await invoke('get_flows') + const match = (flows || []).find((f) => f.name === flowInvitation.flow_name) + if (match?.flow_path) { + await invoke('open_folder', { path: match.flow_path }) + if (statusEl) statusEl.textContent = `📂 Opened: ${match.flow_path}` + return + } + if (statusEl) statusEl.textContent = '⚠ Could not locate flow files on disk.' + } catch (error) { + console.error('Failed to open flow files:', error) + if (statusEl) statusEl.textContent = `⚠ Failed to open flow files: ${error}` + } + }) + + importBtn.addEventListener('click', async () => { + console.log('[Flow Import] Button clicked') + importBtn.disabled = true + importBtn.textContent = 'Importing...' + let importCompleted = false + try { + console.log( + '[Flow Import] Flow spec:', + JSON.stringify(flowInvitation.flow_spec, null, 2).substring(0, 500), + ) + console.log('[Flow Import] Calling import_flow_from_json...') + const result = await importFlowFromInvitation(false) + console.log('[Flow Import] Success:', result) + + // Update UI + importBtn.style.display = 'none' + joinBtn.style.display = 'inline-block' + if (statusEl) statusEl.textContent = '✓ Flow imported' + importCompleted = true + } catch (error) { + const errText = String(error?.message || error || '') + const alreadyExists = /already exists/i.test(errText) + if (alreadyExists) { + let shouldOverwrite = false + if (dialog?.confirm) { + shouldOverwrite = await dialog.confirm( + `Flow "${flowInvitation.flow_name}" already exists. Overwrite flow and submodules?`, + { + title: 'Overwrite Flow', + kind: 'warning', + }, + ) + } + + if (shouldOverwrite) { + try { + importBtn.textContent = 'Overwriting...' + const result = await importFlowFromInvitation(true) + console.log('[Flow Import] Overwrite success:', result) + importBtn.style.display = 'none' + joinBtn.style.display = 'inline-block' + if (statusEl) statusEl.textContent = '✓ Flow overwritten' + importCompleted = true + return + } catch (overwriteErr) { + console.error('[Flow Import] Overwrite failed:', overwriteErr) + importBtn.disabled = false + importBtn.textContent = '📥 Import Flow' + if (statusEl) statusEl.textContent = `⚠ Import failed: ${overwriteErr}` + return + } + } + } + + console.error('[Flow Import] Failed:', error) + importBtn.disabled = false + importBtn.textContent = '📥 Import Flow' + if (statusEl) statusEl.textContent = `⚠ Import failed: ${error}` + } finally { + if (!importCompleted) { + importBtn.disabled = false + if (importBtn.textContent !== '📥 Import Flow') { + importBtn.textContent = '📥 Import Flow' + } + } + } + }) + + joinBtn.addEventListener('click', async () => { + // If already joined, just navigate to Runs + if (joinBtn.classList.contains('joined')) { + // Recover from stale "joined but not imported" state. + const exists = await checkFlowExists() + if (!exists) { + try { + if (statusEl) statusEl.textContent = 'Importing flow...' + await importFlowFromInvitation(false) + if (statusEl) statusEl.textContent = '✓ Flow available' + } catch (error) { + if (dialog?.message) { + await dialog.message(`Flow is missing locally: ${error}`, { + title: 'Import Required', + kind: 'error', + }) + } + return + } + } + const event = new CustomEvent('navigate-to-tab', { detail: { tab: 'runs' } }) + window.dispatchEvent(event) + return + } + + try { + joinBtn.disabled = true + joinBtn.textContent = 'Joining...' + + // Safety: ensure local flow import exists before accepting session. + // This prevents "View/Join without import" broken state. + let flowExists = await checkFlowExists() + if (!flowExists) { + if (statusEl) statusEl.textContent = 'Importing flow...' + await importFlowFromInvitation(false) + flowExists = await checkFlowExists() + if (!flowExists) { + throw new Error('Flow import did not complete locally') + } + } + + const initialOverrides = buildDefaultInvitationInputOverrides( + flowInvitation.flow_spec, + flowInvitation.participants, + ) + const selectedOverrides = await promptFlowInputOverrides({ + flowName: flowInvitation.flow_name, + flowSpec: flowInvitation.flow_spec, + initialOverrides, + includeDatasites: false, + participants: flowInvitation.participants, + currentUser, + }) + if (!selectedOverrides) { + joinBtn.disabled = false + joinBtn.textContent = '🤝 Join Flow' + if (statusEl) statusEl.textContent = 'Join canceled' + return + } + + // Accept the invitation (backend only, no modal) + const result = await invoke('accept_flow_invitation', { + sessionId: flowInvitation.session_id, + flowName: flowInvitation.flow_name, + flowSpec: flowInvitation.flow_spec, + participants: flowInvitation.participants, + autoRunAll: false, + threadId: + (activeThreadId && String(activeThreadId).trim()) || + (msg?.thread_id && String(msg.thread_id).trim()) || + (flowInvitation?.thread_id && String(flowInvitation.thread_id).trim()) || + null, + inputOverrides: selectedOverrides, + }) + + console.log('[Join Flow] Accepted:', result) + + // Update button to "View Flow" and keep it clickable + joinBtn.textContent = '📋 View Flow' + joinBtn.classList.add('joined') + joinBtn.disabled = false + + // Hide Decline button after joining + declineBtn.style.display = 'none' + if (statusEl) statusEl.textContent = '✓ Joined flow' + } catch (error) { + console.error('Failed to accept flow invitation:', error) + joinBtn.disabled = false + joinBtn.textContent = '🤝 Join Flow' + if (dialog?.message) { + await dialog.message(`Failed to join flow: ${error}`, { + title: 'Error', + kind: 'error', + }) + } + } + }) + + actions.appendChild(importBtn) + actions.appendChild(joinBtn) + actions.appendChild(syncBtn) + actions.appendChild(showFilesBtn) + + const declineBtn = document.createElement('button') + declineBtn.className = 'flow-invitation-btn decline-btn' + declineBtn.textContent = 'Decline' + + // Hide decline button if user is the sender (proposer) + const isSender = emailsMatch(invitationSender, currentUser) + if (isSender) { + declineBtn.style.display = 'none' + } + + declineBtn.addEventListener('click', async () => { + if (dialog?.confirm) { + const confirmed = await dialog.confirm('Decline this flow invitation?', { + title: 'Decline Invitation', + kind: 'warning', + }) + if (confirmed) { + // Hide the invitation card + invitationCard.innerHTML = ` +
+ ❌ You declined this flow invitation +
+ ` + invitationCard.classList.add('declined') + } + } + }) + actions.appendChild(declineBtn) + + invitationCard.appendChild(actions) + msgDiv.appendChild(invitationCard) + + // Check if flow already exists and if user already joined. + // Keep "Import Flow" visible when joined but not imported locally. + checkFlowExists().then(async (exists) => { + let alreadyJoined = false + try { + const state = await invoke('get_multiparty_flow_state', { + sessionId: flowInvitation.session_id, + }) + alreadyJoined = !!(state && state.session_id && state.run_id) + } catch (e) { + console.log('[Flow Invitation] Session not found, user can join') + } + + // Sender already has the flow source; never force "Import Flow" on proposer cards. + if (isSender) { + importBtn.style.display = 'none' + joinBtn.style.display = 'inline-block' + declineBtn.style.display = 'none' + const joinedLocally = alreadyJoined || joinBtn.classList.contains('joined') + if (joinedLocally) { + joinBtn.textContent = '📋 View Flow' + joinBtn.classList.add('joined') + if (statusEl) statusEl.textContent = '✓ Already joined' + } else { + joinBtn.textContent = '🤝 Join Flow' + joinBtn.classList.remove('joined') + if (statusEl) statusEl.textContent = 'Join your collaborative run' + } + return + } + + if (alreadyJoined) { + declineBtn.style.display = 'none' + if (exists) { + joinBtn.style.display = 'inline-block' + joinBtn.textContent = '📋 View Flow' + joinBtn.classList.add('joined') + importBtn.style.display = 'none' + if (statusEl) statusEl.textContent = '✓ Already joined' + } else { + // Joined session exists, but local flow import is missing. + // Enforce import first to avoid broken "View Flow" state. + importBtn.style.display = 'inline-block' + importBtn.disabled = false + importBtn.textContent = '📥 Import Flow' + joinBtn.style.display = 'none' + joinBtn.classList.add('joined') + if (statusEl) statusEl.textContent = 'Import flow to view joined session' + } + return + } + + // Not joined yet: enforce Import -> Join ordering. + if (exists) { + joinBtn.style.display = 'inline-block' + importBtn.style.display = 'none' + if (statusEl) statusEl.textContent = '✓ Flow available' + } else if (statusEl) { + importBtn.style.display = 'inline-block' + joinBtn.style.display = 'none' + statusEl.textContent = 'Import flow, then join' + } + }) + } + // Timestamp - show on last message of group if (isLast && msg.created_at) { const footer = document.createElement('div') @@ -2588,6 +4072,66 @@ export function createMessagesModule({ }, 50) } + // ============================================================================ + // MULTIPARTY FLOW HELPERS + // ============================================================================ + + function getActiveThreadParticipants() { + if (!activeThreadId) return [] + const normalized = threadParticipantsById.get(activeThreadId) + if (Array.isArray(normalized) && normalized.length > 0) { + return normalized + } + const thread = messageThreads.find((t) => t.thread_id === activeThreadId) + if (!thread) return [] + return thread.participants || [] + } + + function isGroupThread() { + const participants = getActiveThreadParticipants() + const currentUser = getCurrentUserEmail() + const otherParticipants = participants.filter((p) => !emailsMatch(p, currentUser)) + return otherParticipants.length > 1 + } + + async function sendMessageToRecipients({ recipients, body, subject, metadata }) { + if (!recipients || recipients.length === 0) { + throw new Error('No recipients specified') + } + if (!body) { + throw new Error('Message body is required') + } + + const syftboxStatus = getSyftboxStatus() + if (!syftboxStatus.running) { + throw new Error('You must be online to send messages') + } + + const request = + recipients.length === 1 + ? { to: recipients[0], subject: subject || NO_SUBJECT_PLACEHOLDER, body, metadata } + : { recipients, subject: subject || NO_SUBJECT_PLACEHOLDER, body, metadata } + + const sent = await invoke('send_message', { request }) + + // Refresh thread list + await loadMessageThreads(false, { emitToasts: false }) + + return sent + } + + function updateProposeFlowButton() { + const btn = document.getElementById('propose-flow-btn') + if (!btn) return + + // Show button only in group chats + if (isGroupThread() && activeThreadId && !isComposingNewMessage) { + btn.style.display = 'flex' + } else { + btn.style.display = 'none' + } + } + // ============================================================================ // PUBLIC API // ============================================================================ @@ -2621,6 +4165,11 @@ export function createMessagesModule({ deleteFailedMessage, // Shared renderer for embedding in other views renderMessagesToContainer, + // Multiparty flow helpers + getActiveThreadParticipants, + isGroupThread, + sendMessageToRecipients, + updateProposeFlowButton, } } diff --git a/src/multiparty-flow-modal.js b/src/multiparty-flow-modal.js new file mode 100644 index 00000000..8483623c --- /dev/null +++ b/src/multiparty-flow-modal.js @@ -0,0 +1,1109 @@ +export function createMultipartyFlowModal({ invoke, dialog }) { + let currentSessionId = null + let currentFlowState = null + let pollInterval = null + + function escapeHtml(text) { + const div = document.createElement('div') + div.textContent = text + return div.innerHTML + } + + function getStatusIcon(status) { + switch (status) { + case 'Pending': + return '⏳' + case 'WaitingForInputs': + return '📥' + case 'Ready': + return '✅' + case 'Running': + return '🔄' + case 'Completed': + return '✓' + case 'Sharing': + return '📤' + case 'Shared': + return '📨' + case 'Failed': + return '❌' + default: + return '❓' + } + } + + function getStatusClass(status) { + switch (status) { + case 'Pending': + return 'status-pending' + case 'WaitingForInputs': + return 'status-waiting' + case 'Ready': + return 'status-ready' + case 'Running': + return 'status-running' + case 'Completed': + return 'status-completed' + case 'Sharing': + return 'status-sharing' + case 'Shared': + return 'status-shared' + case 'Failed': + return 'status-failed' + default: + return '' + } + } + + async function openModal(sessionId, flowName, flowSpec, participants) { + currentSessionId = sessionId + + const modal = document.getElementById('multiparty-flow-modal') + if (!modal) { + console.error('Multiparty flow modal not found') + return + } + + const titleEl = modal.querySelector('.multiparty-modal-title') + if (titleEl) { + titleEl.textContent = `🔄 Multiparty Flow: "${flowName}"` + } + + renderParticipants(modal, participants) + await refreshFlowState() + modal.style.display = 'flex' + startPolling() + } + + function closeModal() { + const modal = document.getElementById('multiparty-flow-modal') + if (modal) { + modal.style.display = 'none' + } + stopPolling() + currentSessionId = null + currentFlowState = null + } + + function renderParticipants(modal, participants) { + const container = modal.querySelector('.multiparty-participants-list') + if (!container) return + + container.innerHTML = participants + .map( + (p) => ` +
+ 👤 + ${escapeHtml(p.email)} + (${escapeHtml(p.role)}) +
+ `, + ) + .join('') + } + + function renderSteps(modal, steps, myRole) { + const container = modal.querySelector('.multiparty-steps-list') + if (!container) return + + container.innerHTML = steps + .map( + (step) => ` +
+
+ +
+ ${escapeHtml(step.name)} + + ${getStatusIcon(step.status)} ${step.status} + +
+
+
${escapeHtml(step.description)}
+ ${renderStepActions(step)} +
+ `, + ) + .join('') + } + + function renderStepActions(step) { + if (!step.my_action) { + return '
Waiting for other participant
' + } + + const actions = [] + + if (step.status === 'Ready' || step.status === 'Pending') { + actions.push( + ``, + ) + } + + if (step.status === 'Running') { + actions.push('Running...') + } + + if (step.status === 'Completed' && step.shares_output && !step.outputs_shared) { + actions.push( + ``, + ) + actions.push( + ``, + ) + } + + if (step.status === 'Completed' && !step.shares_output) { + actions.push('✓ Complete') + } + + if (step.status === 'Shared') { + actions.push('📨 Outputs Shared') + } + + if (step.status === 'WaitingForInputs') { + actions.push( + 'Waiting for inputs from other participants...', + ) + } + + return `
${actions.join('')}
` + } + + async function refreshFlowState() { + if (!currentSessionId) return + + try { + const state = await invoke('get_multiparty_flow_state', { sessionId: currentSessionId }) + if (state) { + currentFlowState = state + const modal = document.getElementById('multiparty-flow-modal') + if (modal) { + renderSteps(modal, state.steps, state.my_role) + } + } + } catch (error) { + console.error('Failed to refresh flow state:', error) + } + } + + async function toggleAutoRun(stepId, autoRun) { + if (!currentSessionId) return + + try { + await invoke('set_step_auto_run', { + sessionId: currentSessionId, + stepId, + autoRun, + }) + await refreshFlowState() + } catch (error) { + console.error('Failed to toggle auto-run:', error) + } + } + + async function runStep(stepId) { + if (!currentSessionId) return + + try { + await invoke('run_flow_step', { + sessionId: currentSessionId, + stepId, + }) + await refreshFlowState() + } catch (error) { + console.error('Failed to run step:', error) + if (dialog?.message) { + await dialog.message(`Failed to run step: ${error}`, { title: 'Error', kind: 'error' }) + } + } + } + + async function previewOutputs(stepId) { + if (!currentSessionId) return + + try { + const files = await invoke('get_step_output_files', { + sessionId: currentSessionId, + stepId, + }) + + if (files && files.length > 0) { + const fileList = files.map((f) => `• ${f}`).join('\n') + if (dialog?.message) { + await dialog.message(`Files to be shared:\n\n${fileList}`, { + title: 'Output Files', + kind: 'info', + }) + } + } else { + if (dialog?.message) { + await dialog.message('No output files found', { title: 'Preview', kind: 'info' }) + } + } + } catch (error) { + console.error('Failed to preview outputs:', error) + } + } + + async function shareOutputs(stepId) { + if (!currentSessionId) return + + const confirmed = await dialog?.ask( + 'Are you sure you want to share these outputs with other participants?', + { + title: 'Share Outputs', + kind: 'warning', + okLabel: 'Share', + cancelLabel: 'Cancel', + }, + ) + + if (!confirmed) return + + try { + await invoke('share_step_outputs', { + sessionId: currentSessionId, + stepId, + }) + await refreshFlowState() + } catch (error) { + console.error('Failed to share outputs:', error) + if (dialog?.message) { + await dialog.message(`Failed to share outputs: ${error}`, { title: 'Error', kind: 'error' }) + } + } + } + + async function acceptInvitation(sessionId, flowName, flowSpec, participants, autoRunAll = false) { + try { + const state = await invoke('accept_flow_invitation', { + sessionId, + flowName, + flowSpec, + participants, + autoRunAll, + }) + openModal(sessionId, flowName, flowSpec, participants) + return state + } catch (error) { + console.error('Failed to accept invitation:', error) + throw error + } + } + + function startPolling() { + stopPolling() + pollInterval = setInterval(refreshFlowState, 2000) + } + + function stopPolling() { + if (pollInterval) { + clearInterval(pollInterval) + pollInterval = null + } + } + + window.multipartyFlowModal = { + openModal, + closeModal, + toggleAutoRun, + runStep, + previewOutputs, + shareOutputs, + acceptInvitation, + refreshFlowState, + } + + return { + openModal, + closeModal, + acceptInvitation, + refreshFlowState, + } +} + +export function createProposeFlowModal({ + invoke, + dialog, + getCurrentUserEmail, + getThreadParticipants, + sendMessage, +}) { + let selectedFlow = null + let flowRoles = [] + let roleAssignments = {} + let flowRoleDefaults = {} + + function escapeHtml(text) { + const div = document.createElement('div') + div.textContent = text + return div.innerHTML + } + + function isMultipartyFlow(flow) { + const spec = flow?.spec || flow || {} + if (spec.multiparty === true || flow?.multiparty === true) return true + if (Array.isArray(spec.roles) && spec.roles.length > 1) return true + if (Array.isArray(flow?.roles) && flow.roles.length > 1) return true + if (spec?.inputs?.datasites) return true + if (Array.isArray(spec?.datasites?.all) && spec.datasites.all.length > 1) return true + if (Array.isArray(spec?.datasites) && spec.datasites.length > 1) return true + return false + } + + function getDefaultDatasitesFromFlow(flow) { + const spec = flow?.spec || flow || {} + if (Array.isArray(spec?.inputs?.datasites?.default)) { + return spec.inputs.datasites.default.filter(Boolean) + } + if (Array.isArray(spec?.datasites?.all)) { + return spec.datasites.all.filter(Boolean) + } + if (Array.isArray(spec?.datasites)) { + return spec.datasites.filter(Boolean) + } + return [] + } + + function resolveDefaultFromIncludeToken(token, defaultDatasites) { + const trimmed = String(token || '').trim() + if (!trimmed) return '' + if (trimmed.includes('@')) return trimmed + if (trimmed.startsWith('{datasites[') && trimmed.endsWith(']}')) { + const idxStr = trimmed.slice('{datasites['.length, -2) + const idx = Number.parseInt(idxStr, 10) + if (Number.isFinite(idx) && idx >= 0 && idx < defaultDatasites.length) { + return defaultDatasites[idx] || '' + } + } + return '' + } + + function collectStepTargetTokens(spec) { + const steps = Array.isArray(spec?.steps) ? spec.steps : [] + const tokens = [] + for (const step of steps) { + const runTargets = step?.run?.targets ?? step?.runs_on + if (Array.isArray(runTargets)) { + runTargets.forEach((t) => tokens.push(String(t || '').trim())) + } else if (typeof runTargets === 'string') { + tokens.push(runTargets.trim()) + } + const barrierTargets = step?.barrier?.targets + if (Array.isArray(barrierTargets)) { + barrierTargets.forEach((t) => tokens.push(String(t || '').trim())) + } else if (typeof barrierTargets === 'string') { + tokens.push(barrierTargets.trim()) + } + } + return tokens.filter(Boolean) + } + + function normalizeRoleFromTargetToken(token) { + const t = String(token || '').trim().toLowerCase() + if (!t) return null + if (t === 'all' || t === '*' || t === '{datasites[*]}' || t === '{datasite.current}') return null + if (t.startsWith('{groups.') && t.endsWith('}')) { + const name = t.slice('{groups.'.length, -1).trim() + if (!name) return null + if (name === 'contributor' || name === 'contributors') return 'clients' + return name + } + if (t.includes('@')) { + const local = t.split('@')[0] || '' + if (/^aggregator\d*$/.test(local)) return 'aggregator' + if (/^(client|contributor)\d+$/.test(local)) return 'clients' + if (local === 'client' || local === 'clients' || local === 'contributor' || local === 'contributors') return 'clients' + return local || null + } + if (t === 'contributor' || t === 'contributors') return 'clients' + return t + } + + function inferRolesFromStepTargets(spec, defaultDatasites) { + const tokens = collectStepTargetTokens(spec) + if (tokens.length === 0) return null + + const order = [] + const counts = {} + const explicitRoleTargets = {} + for (const token of tokens) { + const role = normalizeRoleFromTargetToken(token) + if (!role) continue + if (!order.includes(role)) order.push(role) + if (counts[role] == null) counts[role] = 0 + if (!explicitRoleTargets[role]) explicitRoleTargets[role] = new Set() + + if (token.includes('@')) { + const local = token.split('@')[0].toLowerCase() + if (/^(client|contributor)\d+$/.test(local)) { + // Count explicit numbered targets only once per unique label. + explicitRoleTargets[role].add(local) + counts[role] = explicitRoleTargets[role].size + } else if (counts[role] === 0) { + counts[role] = 1 + } + } else if (counts[role] === 0) { + counts[role] = 1 + } + } + + if (order.length === 0) return null + + let totalSlots = Object.values(counts).reduce((sum, n) => sum + (n || 0), 0) + if (defaultDatasites.length > totalSlots) { + const remainder = defaultDatasites.length - totalSlots + const expandable = + order.find((r) => r === 'clients') || + order.find((r) => r.endsWith('s')) || + order[0] + counts[expandable] = (counts[expandable] || 0) + remainder + totalSlots += remainder + } + + // Preserve aggregator visibility when present. + if (order.includes('aggregator')) { + const withoutAggregator = order.filter((r) => r !== 'aggregator') + order.splice(0, order.length, 'aggregator', ...withoutAggregator) + } + + // Never infer more slots than default datasites when defaults are available. + // This prevents repeated step targets from inflating role rows (e.g. clients 1..6). + const maxSlots = defaultDatasites.length > 0 ? defaultDatasites.length : Number.MAX_SAFE_INTEGER + + const roles = [] + for (const role of order) { + const count = Math.max(1, counts[role] || 0) + for (let i = 0; i < count; i += 1) { + if (roles.length >= maxSlots) break + const roleId = count > 1 ? `${role}_${i + 1}` : role + roles.push({ + id: roleId, + role, + label: count > 1 ? `${role} ${i + 1}` : role, + }) + } + if (roles.length >= maxSlots) break + } + return roles.length > 0 ? roles : null + } + + function inferFlowRoles(flow) { + const spec = flow?.spec || flow || {} + const roles = Array.isArray(spec?.roles) ? spec.roles : [] + if (roles.length > 0) { + return { roles, defaults: {} } + } + + const defaultDatasites = getDefaultDatasitesFromFlow(flow) + const groups = spec?.datasites?.groups + if (defaultDatasites.length > 0) { + // If groups are not preserved in the loaded flow spec, infer semantic roles + // from step targets (e.g. clients/aggregator) before falling back to generic + // participant slots. + const hasGroups = + !!groups && typeof groups === 'object' && Object.keys(groups).length > 0 + if (!hasGroups) { + const rolesFromTargets = inferRolesFromStepTargets(spec, defaultDatasites) + if (rolesFromTargets && rolesFromTargets.length > 0) { + const selectedRoles = rolesFromTargets.slice(0, defaultDatasites.length) + const defaults = {} + for (let i = 0; i < Math.min(defaultDatasites.length, selectedRoles.length); i += 1) { + defaults[selectedRoles[i].id] = defaultDatasites[i] + } + return { roles: selectedRoles, defaults } + } + } + + const inferredRoles = [] + const defaults = {} + const roleCounts = {} + + for (let i = 0; i < defaultDatasites.length; i += 1) { + const email = defaultDatasites[i] + let roleName = 'participant' + + if (groups && typeof groups === 'object') { + for (const [groupName, groupDef] of Object.entries(groups)) { + const include = Array.isArray(groupDef?.include) ? groupDef.include : [] + for (const token of include) { + const resolved = resolveDefaultFromIncludeToken(token, defaultDatasites) + if (resolved && resolved.toLowerCase() === String(email).toLowerCase()) { + roleName = groupName + break + } + } + if (roleName !== 'participant') break + } + } + + roleCounts[roleName] = (roleCounts[roleName] || 0) + 1 + const countForRole = roleCounts[roleName] + const roleId = countForRole > 1 ? `${roleName}_${countForRole}` : roleName + defaults[roleId] = email + inferredRoles.push({ + id: roleId, + role: roleName, + label: countForRole > 1 ? `${roleName} ${countForRole}` : roleName, + description: `Default: ${email}`, + }) + } + + return { roles: inferredRoles, defaults } + } + if (groups && typeof groups === 'object') { + const inferredRoles = [] + const defaults = {} + + for (const [groupName, groupDef] of Object.entries(groups)) { + const include = Array.isArray(groupDef?.include) ? groupDef.include : [] + const slotCount = Math.max(include.length, 1) + + for (let i = 0; i < slotCount; i += 1) { + const roleId = slotCount > 1 ? `${groupName}_${i + 1}` : groupName + const defaultEmail = resolveDefaultFromIncludeToken(include[i], defaultDatasites) + if (defaultEmail) defaults[roleId] = defaultEmail + inferredRoles.push({ + id: roleId, + role: groupName, + label: slotCount > 1 ? `${groupName} ${i + 1}` : groupName, + description: defaultEmail ? `Default: ${defaultEmail}` : '', + }) + } + } + + if (inferredRoles.length > 0) { + return { roles: inferredRoles, defaults } + } + } + + const rolesFromTargets = inferRolesFromStepTargets(spec, defaultDatasites) + if (rolesFromTargets && rolesFromTargets.length > 0) { + const defaults = {} + for (let i = 0; i < Math.min(defaultDatasites.length, rolesFromTargets.length); i += 1) { + defaults[rolesFromTargets[i].id] = defaultDatasites[i] + } + return { roles: rolesFromTargets, defaults } + } + + const defaults = {} + const datasites = defaultDatasites + const participantCount = Math.max(datasites.length, 2) + const inferredRoles = Array.from({ length: participantCount }, (_, index) => { + const roleId = `participant${index + 1}` + if (datasites[index]) defaults[roleId] = datasites[index] + return { + id: roleId, + description: datasites[index] ? `Default: ${datasites[index]}` : '', + } + }) + return { roles: inferredRoles, defaults } + } + + async function open() { + const modal = document.getElementById('propose-flow-modal') + if (!modal) { + console.error('Propose flow modal not found') + return + } + + // Reset state + selectedFlow = null + flowRoles = [] + roleAssignments = {} + flowRoleDefaults = {} + + // Load multiparty flows + await loadMultipartyFlows() + + // Show modal + modal.style.display = 'flex' + } + + function close() { + const modal = document.getElementById('propose-flow-modal') + if (modal) { + modal.style.display = 'none' + } + selectedFlow = null + flowRoles = [] + roleAssignments = {} + flowRoleDefaults = {} + } + + async function loadMultipartyFlows() { + const select = document.getElementById('propose-flow-select') + if (!select) return + + try { + // Get local flows (use get_flows command, not list_flows) + const flows = await invoke('get_flows') + console.log('[ProposeFlow] Loaded flows:', flows) + console.log('[ProposeFlow] Flow count:', flows?.length || 0) + + // Debug: log each flow's structure + ;(flows || []).forEach((f, i) => { + console.log(`[ProposeFlow] Flow ${i}:`, { + name: f.metadata?.name || f.name, + hasSpec: !!f.spec, + specKeys: f.spec ? Object.keys(f.spec) : [], + specFull: f.spec, + multiparty: f.spec?.multiparty, + roles: f.spec?.roles, + topLevelKeys: Object.keys(f), + }) + }) + + // Filter for multiparty flows - check various possible structures + const multipartyFlows = (flows || []).filter((f) => isMultipartyFlow(f)) + + console.log('[ProposeFlow] Multiparty flows:', multipartyFlows) + + select.innerHTML = '' + + if (multipartyFlows.length === 0) { + // No multiparty flows found - show helpful option + const noFlowsOption = document.createElement('option') + noFlowsOption.value = '__no_flows__' + noFlowsOption.textContent = 'No multiparty flows found - click to import' + select.appendChild(noFlowsOption) + } else { + for (const flow of multipartyFlows) { + const name = flow.metadata?.name || flow.name || 'Unknown' + const option = document.createElement('option') + option.value = name + option.textContent = name + option.dataset.flowSpec = JSON.stringify(flow) + select.appendChild(option) + } + } + + // Add event listener for flow selection + select.onchange = () => handleFlowSelection(select) + } catch (error) { + console.error('Failed to load flows:', error) + // Show option to go to Flows page + select.innerHTML = '' + const errorOption = document.createElement('option') + errorOption.value = '__go_to_flows__' + errorOption.textContent = 'Go to Flows to import one →' + select.appendChild(errorOption) + } + } + + function handleFlowSelection(select) { + const selectedOption = select.options[select.selectedIndex] + const rolesSection = document.getElementById('propose-flow-roles-section') + const messageSection = document.getElementById('propose-flow-message-section') + const sendBtn = document.getElementById('propose-flow-send-btn') + + // Handle special navigation options + if (selectedOption?.value === '__no_flows__' || selectedOption?.value === '__go_to_flows__') { + close() + // Navigate to Flows tab + const event = new CustomEvent('navigate-to-tab', { detail: { tab: 'flows' } }) + window.dispatchEvent(event) + return + } + + if (!selectedOption?.value || !selectedOption.dataset.flowSpec) { + selectedFlow = null + flowRoles = [] + flowRoleDefaults = {} + roleAssignments = {} + if (rolesSection) rolesSection.style.display = 'none' + if (messageSection) messageSection.style.display = 'none' + if (sendBtn) sendBtn.disabled = true + return + } + + try { + selectedFlow = JSON.parse(selectedOption.dataset.flowSpec) + const inferred = inferFlowRoles(selectedFlow) + flowRoles = inferred.roles + flowRoleDefaults = inferred.defaults + roleAssignments = {} + renderRoleAssignments() + if (rolesSection) rolesSection.style.display = 'block' + if (messageSection) messageSection.style.display = 'block' + updateSendButton() + } catch (error) { + console.error('Failed to parse flow spec:', error) + } + } + + function renderRoleAssignments() { + const container = document.getElementById('propose-flow-roles-list') + if (!container) return + + // Get current user and thread participants + const currentUser = getCurrentUserEmail ? getCurrentUserEmail() : '' + const participants = getThreadParticipants ? getThreadParticipants() : [] + // Build list of available contacts with stable de-duplication. + // Use live identities only (current user + thread participants). + // Do not filter role-like sandbox emails here because they are valid identities + // in local/dev scenarios (e.g., client1@sandbox.local, aggregator@sandbox.local). + const contacts = [] + const seenEmails = new Set() + const addContact = (email, isMe = false) => { + const normalized = String(email || '').trim() + if (!normalized) return + const key = normalized.toLowerCase() + if (seenEmails.has(key)) { + if (isMe) { + const existing = contacts.find((c) => c.email.toLowerCase() === key) + if (existing) existing.isMe = true + } + return + } + seenEmails.add(key) + contacts.push({ email: normalized, isMe }) + } + + addContact(currentUser, true) + for (const p of participants) addContact(p, String(p || '').trim() === currentUser) + + const usedAutoAssignments = new Set() + + container.innerHTML = flowRoles + .map((role, idx) => { + const roleId = role.id || role + const roleLabel = role.label || roleId + + // Auto-assign if possible + let defaultValue = '' + const preferred = flowRoleDefaults[roleId] || '' + const preferredInContacts = preferred + ? contacts.find((c) => c.email.toLowerCase() === preferred.toLowerCase()) + : null + if ( + preferredInContacts && + !usedAutoAssignments.has(preferredInContacts.email.toLowerCase()) + ) { + defaultValue = preferredInContacts.email + } else { + const fallback = contacts.find( + (c) => !usedAutoAssignments.has(c.email.toLowerCase()), + ) + if (fallback) defaultValue = fallback.email + } + if (defaultValue) { + roleAssignments[roleId] = defaultValue + usedAutoAssignments.add(defaultValue.toLowerCase()) + } + + const optionsHtml = contacts + .map( + (c) => + ``, + ) + .join('') + + return ` +
+
${escapeHtml(roleLabel)}
+ + +
+ ` + }) + .join('') + } + + function updateRoleAssignment(roleId, email) { + if (email) { + roleAssignments[roleId] = email + } else { + delete roleAssignments[roleId] + } + updateSendButton() + } + + function syncRoleAssignmentsFromDom() { + const container = document.getElementById('propose-flow-roles-list') + if (!container) return + const selects = container.querySelectorAll('.propose-flow-role-select') + for (const select of selects) { + const roleId = select.getAttribute('data-role') + if (!roleId) continue + const value = String(select.value || '').trim() + if (value) { + roleAssignments[roleId] = value + } else { + delete roleAssignments[roleId] + } + } + } + + function updateSendButton() { + const sendBtn = document.getElementById('propose-flow-send-btn') + if (!sendBtn) return + syncRoleAssignmentsFromDom() + if (!selectedFlow) { + const flowSelect = document.getElementById('propose-flow-select') + const selectedIndex = typeof flowSelect?.selectedIndex === 'number' ? flowSelect.selectedIndex : -1 + const selectedOption = + selectedIndex >= 0 && flowSelect?.options ? flowSelect.options[selectedIndex] : null + const serialized = selectedOption?.dataset?.flowSpec + if (serialized) { + try { + selectedFlow = JSON.parse(serialized) + } catch (error) { + console.warn('Failed to rehydrate selected flow from dropdown:', error) + } + } + } + + // Enable if all roles are assigned + const allRolesAssigned = flowRoles.every((role) => { + const roleId = role.id || role + return roleAssignments[roleId] + }) + + sendBtn.disabled = !selectedFlow || !allRolesAssigned + } + + function collectParticipantsFromDom() { + const container = document.getElementById('propose-flow-roles-list') + const participants = [] + for (const role of flowRoles) { + const roleId = role.id || role + const roleName = role.role || roleId + let email = String(roleAssignments[roleId] || '').trim() + if (!email && container) { + const selects = container.querySelectorAll('.propose-flow-role-select') + const select = Array.from(selects).find( + (el) => String(el.getAttribute('data-role') || '') === String(roleId), + ) + email = String(select?.value || '').trim() + } + participants.push({ + email, + role: roleName, + role_id: roleId, + }) + } + return participants + } + + function remapFlowSpecForParticipants(flow, participants) { + // Clone so the original flow object in the modal state remains untouched. + const flowCopy = JSON.parse(JSON.stringify(flow)) + const specRoot = flowCopy?.spec || flowCopy + if (!specRoot || typeof specRoot !== 'object') return flowCopy + + const roleToEmail = new Map() + const roleIdToEmail = new Map() + const orderedEmails = [] + for (const p of participants) { + const email = String(p?.email || '').trim() + if (!email) continue + if (!orderedEmails.includes(email)) orderedEmails.push(email) + if (p?.role) { + const key = String(p.role).trim().toLowerCase() + if (key && !roleToEmail.has(key)) roleToEmail.set(key, email) + } + if (p?.role_id) { + const key = String(p.role_id).trim().toLowerCase() + if (key && !roleIdToEmail.has(key)) roleIdToEmail.set(key, email) + } + } + + const resolveToken = (token) => { + const trimmed = String(token || '').trim() + if (!trimmed) return '' + if (trimmed.startsWith('{datasites[') && trimmed.endsWith(']}')) { + const idxStr = trimmed.slice('{datasites['.length, -2) + const idx = Number.parseInt(idxStr, 10) + if (Number.isFinite(idx) && idx >= 0 && idx < orderedEmails.length) { + return orderedEmails[idx] + } + } + if (trimmed.includes('@')) return trimmed + const lookup = trimmed.toLowerCase() + if (roleIdToEmail.has(lookup)) return roleIdToEmail.get(lookup) || '' + if (roleToEmail.has(lookup)) return roleToEmail.get(lookup) || '' + if ((lookup === 'client' || lookup === 'contributor') && roleToEmail.has('clients')) { + return roleToEmail.get('clients') || '' + } + return '' + } + + if (specRoot.datasites && typeof specRoot.datasites === 'object') { + specRoot.datasites.all = [...orderedEmails] + if (specRoot.datasites.groups && typeof specRoot.datasites.groups === 'object') { + for (const [groupName, groupDef] of Object.entries(specRoot.datasites.groups)) { + const include = Array.isArray(groupDef?.include) ? groupDef.include : [] + const remapped = [] + for (const item of include) { + const token = String(item || '').trim() + if (!token) continue + if (token === '{datasites[*]}' || token.toLowerCase() === 'all') { + remapped.push(...orderedEmails) + continue + } + const mapped = resolveToken(token) + if (mapped) remapped.push(mapped) + } + if (remapped.length > 0) { + groupDef.include = [...new Set(remapped)] + } + } + } + } + + if (specRoot.inputs?.datasites && typeof specRoot.inputs.datasites === 'object') { + specRoot.inputs.datasites.default = [...orderedEmails] + } + + return flowCopy + } + + async function sendInvitation() { + if (!selectedFlow) { + console.error('No flow selected') + return + } + syncRoleAssignmentsFromDom() + + const participants = collectParticipantsFromDom() + const remappedFlowSpec = remapFlowSpecForParticipants(selectedFlow, participants) + const flowName = remappedFlowSpec?.metadata?.name || remappedFlowSpec?.name || 'multiparty' + + const missingRoles = participants.filter((p) => !p.email) + if (missingRoles.length > 0) { + if (dialog?.message) { + await dialog.message('Please assign an email for every role before sending.', { + title: 'Missing Role Assignments', + kind: 'warning', + }) + } + return + } + + const roleIdsByEmail = new Map() + for (const p of participants) { + const list = roleIdsByEmail.get(p.email) || [] + list.push(p.role_id) + roleIdsByEmail.set(p.email, list) + } + const duplicateAssignments = Array.from(roleIdsByEmail.entries()).filter( + ([, roles]) => roles.length > 1, + ) + if (duplicateAssignments.length > 0) { + const details = duplicateAssignments + .map(([email, roles]) => `${email} (${roles.join(', ')})`) + .join('\n') + if (dialog?.message) { + await dialog.message( + `Each role must be assigned to a different participant.\n\nDuplicate assignments:\n${details}`, + { + title: 'Duplicate Role Assignments', + kind: 'warning', + }, + ) + } + return + } + + // Get message text + const messageInput = document.getElementById('propose-flow-message') + const customMessage = messageInput?.value?.trim() || '' + const body = customMessage || `Let's run the "${flowName}" multiparty flow together!` + + // Get recipients (all participants except current user) + const currentUser = getCurrentUserEmail ? getCurrentUserEmail() : '' + const recipients = Array.from( + new Set(participants.filter((p) => p.email !== currentUser).map((p) => p.email)), + ) + + if (recipients.length === 0) { + if (dialog?.message) { + await dialog.message('Please assign at least one other participant to a role.', { + title: 'No Recipients', + kind: 'warning', + }) + } + return + } + + try { + // Get thread ID from messages module + const threadId = window.messagesModule?.activeThreadId || `thread-${Date.now()}` + + // First, set up the proposer's session on the backend - this returns the session ID + const sessionId = await invoke('send_flow_invitation', { + threadId, + flowName, + flowSpec: remappedFlowSpec, + participantRoles: participants, + }) + + // Send message with flow invitation metadata using the backend's session ID + await sendMessage({ + recipients, + body, + subject: `Multiparty Flow: ${flowName}`, + metadata: { + flow_invitation: { + flow_name: flowName, + session_id: sessionId, + proposed_by: currentUser || null, + flow_location: + currentUser && sessionId + ? `syft://${currentUser}/shared/flows/${flowName}/${sessionId}/_flow_source` + : null, + participants, + flow_spec: remappedFlowSpec, + }, + }, + }) + + // Close modal + close() + + // Show success and navigate to Runs + if (dialog?.message) { + await dialog.message( + `Flow invitation sent to ${recipients.join(', ')}. Navigate to Runs to manage the flow.`, + { + title: 'Invitation Sent', + kind: 'info', + }, + ) + } + } catch (error) { + console.error('Failed to send invitation:', error) + if (dialog?.message) { + await dialog.message(`Failed to send invitation: ${error}`, { + title: 'Error', + kind: 'error', + }) + } + } + } + + window.proposeFlowModal = { + open, + close, + updateRoleAssignment, + sendInvitation, + } + + return { + open, + close, + } +} diff --git a/src/runs.js b/src/runs.js index 01d382bf..a7e221f5 100644 --- a/src/runs.js +++ b/src/runs.js @@ -15,6 +15,282 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { const flowNfCommandCache = new Map() const flowTimerIntervals = new Map() let flowReconcileInFlight = false + const multipartyPollingIntervals = new Map() + const multipartyActiveTabs = new Map() + const multipartyLastSyncAt = new Map() + const multipartyStepExpanded = new Map() + const multipartyCodeExpanded = new Map() + const multipartyLogExpanded = new Map() + const multipartyStepLogIntervals = new Map() + const multipartyStepLogCache = new Map() + const multipartyStepTimerIntervals = new Map() + const multipartyRenderedHtml = new Map() + const multipartyRenderKeys = new Map() + const multipartyStepTimers = new Map() + const multipartyParticipantStatusMemory = new Map() + const multipartyDiagnosticsSamples = new Map() + const multipartyLoadInFlight = new Map() + let runsRenderKey = '' + + function getNestedState(map, sessionId) { + let nested = map.get(sessionId) + if (!nested) { + nested = new Map() + map.set(sessionId, nested) + } + return nested + } + + // Start polling for multiparty state updates + function startMultipartyPolling(sessionId, runId) { + stopMultipartyPolling(sessionId) + // Poll every 3 seconds for state updates from other participants + const interval = setInterval(async () => { + if (multipartyLoadInFlight.get(sessionId)) return + multipartyLoadInFlight.set(sessionId, true) + try { + await loadMultipartySteps(sessionId, runId) + } catch (err) { + console.warn('Multiparty polling error:', err) + } finally { + multipartyLoadInFlight.set(sessionId, false) + } + }, 3000) + multipartyPollingIntervals.set(sessionId, interval) + const timerInterval = setInterval(() => { + refreshStepTimerNodes(sessionId) + }, 1000) + multipartyStepTimerIntervals.set(sessionId, timerInterval) + } + + function stopMultipartyPolling(sessionId) { + const interval = multipartyPollingIntervals.get(sessionId) + if (interval) { + clearInterval(interval) + multipartyPollingIntervals.delete(sessionId) + } + const timerInterval = multipartyStepTimerIntervals.get(sessionId) + if (timerInterval) { + clearInterval(timerInterval) + multipartyStepTimerIntervals.delete(sessionId) + } + stopAllStepLogPolling(sessionId) + multipartyLoadInFlight.delete(sessionId) + for (const key of multipartyDiagnosticsSamples.keys()) { + if (key.startsWith(`${sessionId}::`)) { + multipartyDiagnosticsSamples.delete(key) + } + } + } + + function stopAllMultipartyPolling() { + multipartyPollingIntervals.forEach((_, sessionId) => stopMultipartyPolling(sessionId)) + multipartyStepLogIntervals.forEach((interval) => clearInterval(interval)) + multipartyStepLogIntervals.clear() + multipartyStepTimerIntervals.forEach((interval) => clearInterval(interval)) + multipartyStepTimerIntervals.clear() + multipartyRenderedHtml.clear() + multipartyRenderKeys.clear() + multipartyStepTimers.clear() + multipartyParticipantStatusMemory.clear() + multipartyDiagnosticsSamples.clear() + multipartyLoadInFlight.clear() + } + + function getStepLogKey(sessionId, stepId) { + return `${sessionId}::${stepId}` + } + + function getStepTimerKey(sessionId, stepId) { + return `${sessionId}::${stepId}` + } + + async function showDockerWarningModal(runAction) { + return new Promise((resolve) => { + const existing = document.getElementById('docker-warning-modal') + if (existing) existing.remove() + + const overlay = document.createElement('div') + overlay.id = 'docker-warning-modal' + overlay.style.cssText = + 'position: fixed; inset: 0; background: rgba(15,23,42,0.45); display: flex; align-items: center; justify-content: center; z-index: 9999;' + + const modal = document.createElement('div') + modal.style.cssText = + 'background: #ffffff; color: #0f172a; width: min(460px, 92vw); border-radius: 14px; box-shadow: 0 18px 50px rgba(0,0,0,0.25); padding: 22px 24px; display: flex; flex-direction: column; gap: 14px;' + + modal.innerHTML = ` +
+
⚠️
+
+
Docker isn’t running
+
+ Start Docker Desktop, then re-check. You can also choose to run anyway (it may fail). +
+
+
+
+
+ + + +
+ ` + + const statusEl = modal.querySelector('#docker-check-status') + + function close() { + overlay.remove() + } + + modal.querySelector('#docker-cancel').addEventListener('click', () => { + close() + resolve(false) + }) + + modal.querySelector('#docker-run-anyway').addEventListener('click', async () => { + close() + await runAction() + resolve(true) + }) + + modal.querySelector('#docker-recheck').addEventListener('click', async () => { + statusEl.textContent = 'Checking Docker...' + statusEl.style.color = '#0f172a' + try { + const running = await invoke('check_docker_running') + if (running) { + statusEl.textContent = 'Docker is running! Running step...' + statusEl.style.color = '#15803d' + close() + await runAction() + resolve(true) + } else { + statusEl.textContent = 'Still not running. Please start Docker then click re-check.' + statusEl.style.color = '#b91c1c' + } + } catch (err) { + console.error('Docker re-check failed:', err) + statusEl.textContent = 'Could not check Docker (see console).' + statusEl.style.color = '#b91c1c' + } + }) + + overlay.addEventListener('click', (e) => { + if (e.target === overlay) { + close() + resolve(false) + } + }) + + overlay.appendChild(modal) + document.body.appendChild(overlay) + }) + } + + function formatClockDuration(ms) { + const totalSeconds = Math.max(0, Math.floor(ms / 1000)) + const hours = Math.floor(totalSeconds / 3600) + const minutes = Math.floor((totalSeconds % 3600) / 60) + const seconds = totalSeconds % 60 + if (hours > 0) { + return `${hours}:${String(minutes).padStart(2, '0')}:${String(seconds).padStart(2, '0')}` + } + return `${minutes}:${String(seconds).padStart(2, '0')}` + } + + function updateStepTimer(sessionId, stepId, status) { + const key = getStepTimerKey(sessionId, stepId) + const now = Date.now() + const running = status === 'Running' + const done = status === 'Completed' || status === 'Shared' || status === 'Failed' + const reset = status === 'Pending' || status === 'Ready' || status === 'WaitingForInputs' + const existing = multipartyStepTimers.get(key) + + if (!existing) { + if (running || done) { + multipartyStepTimers.set(key, { + startedAt: now, + stoppedAt: done ? now : null, + }) + } + return + } + + if (running) { + // Keep completed timers frozen if a stale refresh briefly reports Running again. + if (existing.stoppedAt) { + return + } + if (!existing.startedAt) { + existing.startedAt = now + } + existing.stoppedAt = null + return + } + + if (done) { + if (!existing.startedAt) { + existing.startedAt = now + } + if (!existing.stoppedAt) { + existing.stoppedAt = now + } + return + } + + if (reset) { + // Preserve frozen durations for already-completed steps even if a stale + // status refresh briefly reports Pending/Ready. + if (existing.stoppedAt) { + return + } + multipartyStepTimers.delete(key) + } + } + + function getStepTimerLabel(sessionId, stepId) { + const timer = multipartyStepTimers.get(getStepTimerKey(sessionId, stepId)) + if (!timer?.startedAt) return '' + const end = timer.stoppedAt || Date.now() + return formatClockDuration(end - timer.startedAt) + } + + function refreshStepTimerNodes(sessionId) { + const nodes = document.querySelectorAll( + `.mp-step-timer[data-session-id="${sessionId}"][data-step-id]`, + ) + nodes.forEach((node) => { + const stepId = node.dataset.stepId + if (!stepId) return + const label = getStepTimerLabel(sessionId, stepId) + node.textContent = label ? `⏱ ${label}` : '' + }) + } + + function stopStepLogPolling(sessionId, stepId) { + const key = getStepLogKey(sessionId, stepId) + const interval = multipartyStepLogIntervals.get(key) + if (interval) { + clearInterval(interval) + multipartyStepLogIntervals.delete(key) + } + } + + function stopAllStepLogPolling(sessionId) { + const prefix = `${sessionId}::` + for (const [key, interval] of multipartyStepLogIntervals.entries()) { + if (key.startsWith(prefix)) { + clearInterval(interval) + multipartyStepLogIntervals.delete(key) + } + } + for (const key of multipartyStepLogCache.keys()) { + if (key.startsWith(prefix)) { + multipartyStepLogCache.delete(key) + } + } + } function parseConcurrencyInput(value) { if (value === null || value === undefined) return null @@ -128,7 +404,7 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { Math.round(avgPerUnit * (progress.total - progress.completed)), ) const cappedMs = Math.min(remainingMs, 24 * 60 * 60 * 1000) - eta.textContent = `ETA ~ ${formatDuration(cappedMs)}` + eta.textContent = `ETA ~ ${formatEtaDuration(cappedMs)}` } else { eta.textContent = '' } @@ -140,7 +416,7 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { }) } - function formatDuration(ms) { + function formatEtaDuration(ms) { if (!Number.isFinite(ms) || ms <= 0) return '—' const totalSeconds = Math.round(ms / 1000) const minutes = Math.floor(totalSeconds / 60) @@ -388,8 +664,9 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { flowNfCommandCache.set(run.id, nfCmd) populateNfCommand(run.id, nfCmd) // Persist to state file so it survives re-renders - if (progress && progress.total) { - saveFlowState(run.id, progress, null, 0, nfCmd) + const cachedProgress = flowProgressCache.get(run.id) + if (cachedProgress && cachedProgress.total) { + saveFlowState(run.id, cachedProgress, null, 0, nfCmd) } } } @@ -581,6 +858,1445 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { updateRunButton() } + // Render multiparty details section for a run + function renderMultipartyDetails(runMetadata, runId) { + const { session_id, my_role, participants } = runMetadata + if (!session_id || !participants) return '' + const inputOverrides = runMetadata?.input_overrides || {} + const inputEntries = Object.entries(inputOverrides).filter(([key]) => key.startsWith('inputs.')) + + const participantsHtml = participants + .map((p) => { + const isMe = p.role === my_role + return ` + ${escapeHtml(p.role)} + ${escapeHtml(p.email)}${isMe ? ' (you)' : ''} + ` + }) + .join('') + + const inputsHtml = + inputEntries.length > 0 + ? ` +
+ + +
` + : '' + + return ` +
+
+
👥 Participants
+
${participantsHtml}
+
+ ${inputsHtml} +
+
📋 Steps
+
+
Loading steps...
+
+
+
+ ` + } + + function updateMultipartyRunCardState(runId, isComplete) { + const runCard = document.querySelector(`.flow-run-card[data-run-id="${runId}"]`) + if (!runCard) return + runCard.classList.toggle('mp-run-complete', !!isComplete) + + const statusBadge = runCard.querySelector('.status-badge') + if (statusBadge && runCard.dataset.isMultiparty === 'true') { + if (isComplete) { + statusBadge.textContent = '✓ Complete' + statusBadge.classList.add('mp-run-status-complete') + } else { + statusBadge.classList.remove('mp-run-status-complete') + if (statusBadge.textContent?.includes('Complete')) { + statusBadge.textContent = '⋯ Running' + } + } + } + } + + // Load and render multiparty steps for a run + async function loadMultipartySteps(sessionId, runId) { + const stepsContainer = document.querySelector(`.mp-steps-list[data-run-id="${runId}"]`) + if (!stepsContainer) return + + // Preserve active tab state before refresh + const activeTab = + multipartyActiveTabs.get(sessionId) || + stepsContainer.querySelector('.mp-tab.active')?.dataset?.tab || + 'steps' + const previousScrollTop = stepsContainer.scrollTop + + try { + // Keep shared _progress state fresh across peers without spamming sync requests. + const now = Date.now() + const lastSync = multipartyLastSyncAt.get(sessionId) || 0 + if (now - lastSync > 2000) { + multipartyLastSyncAt.set(sessionId, now) + await invoke('trigger_syftbox_sync').catch(() => null) + } + + // Fetch flow state, participant progress, and shared activity logs. + const [state, allProgress, activityLogs] = await Promise.all([ + invoke('get_multiparty_flow_state', { sessionId }), + invoke('get_all_participant_progress', { sessionId }).catch(() => []), + invoke('get_participant_logs', { sessionId }).catch(() => []), + ]) + + if (!state || !state.steps) { + multipartyRenderedHtml.delete(sessionId) + multipartyRenderKeys.delete(sessionId) + stepsContainer.innerHTML = '
No steps found
' + return + } + + const diagnosticStepIds = [ + ...new Set( + (state.steps || []) + .filter((step) => { + const id = String(step?.id || '').toLowerCase() + const modulePath = String(step?.module_path || '').toLowerCase() + return id === 'secure_aggregate' || modulePath.includes('secure-aggregate') + }) + .map((step) => step.id), + ), + ] + const diagnosticsByStepId = new Map() + if (diagnosticStepIds.length > 0) { + const diagnosticResults = await Promise.all( + diagnosticStepIds.map((stepId) => + invoke('get_multiparty_step_diagnostics', { sessionId, stepId }).catch(() => null), + ), + ) + for (const diagnostics of diagnosticResults) { + if (diagnostics && diagnostics.step_id) { + diagnosticsByStepId.set(diagnostics.step_id, diagnostics) + } + } + } + + const normalizeKey = (value) => String(value || '').toLowerCase() + const parseTimestampMs = (value) => { + if (!value) return null + const ts = Date.parse(String(value)) + return Number.isFinite(ts) ? ts : null + } + const normalizeProgressStatus = (rawStatus) => { + const raw = String(rawStatus || '') + const normalized = raw.trim().toLowerCase() + if ( + normalized === 'completed' || + normalized === 'complete' || + normalized === 'done' || + normalized === 'success' || + normalized === 'succeeded' + ) { + return 'Completed' + } + if (normalized === 'shared') return 'Shared' + if (normalized === 'sharing') return 'Sharing' + if ( + normalized === 'running' || + normalized === 'in_progress' || + normalized === 'in-progress' + ) + return 'Running' + if (normalized === 'ready') return 'Ready' + if ( + normalized === 'waitingforinputs' || + normalized === 'waiting_for_inputs' || + normalized === 'waiting-for-inputs' + ) { + return 'WaitingForInputs' + } + if (normalized === 'failed' || normalized === 'error') return 'Failed' + return raw || 'Pending' + } + const progressStatusRank = (statusValue) => { + const normalized = normalizeProgressStatus(statusValue) + if (normalized === 'Failed') return 100 + if (normalized === 'Shared') return 90 + if (normalized === 'Completed') return 80 + if (normalized === 'Sharing') return 70 + if (normalized === 'Running') return 60 + if (normalized === 'Ready') return 50 + if (normalized === 'WaitingForInputs') return 40 + return 10 + } + const statusMemory = getNestedState(multipartyParticipantStatusMemory, sessionId) + const memoryKey = (stepId, email) => `${stepId}::${normalizeKey(email)}` + const isDoneForStep = (step, status) => { + if (!status) return false + if (step.shares_output) return status === 'Shared' + return status === 'Completed' || status === 'Shared' + } + // Build a map of step -> participant email -> progress record + const stepParticipants = {} + for (const p of allProgress) { + for (const s of p.steps || []) { + if (!stepParticipants[s.step_id]) stepParticipants[s.step_id] = {} + stepParticipants[s.step_id][normalizeKey(p.email)] = { + email: p.email, + role: p.role, + status: normalizeProgressStatus(s.status), + output_dir: s.output_dir || null, + } + } + } + // Fallback: infer per-participant step completion from shared activity logs. + for (const log of activityLogs || []) { + if (!log?.step_id || !log?.participant) continue + if (log.event !== 'step_completed' && log.event !== 'step_shared') continue + if (!stepParticipants[log.step_id]) stepParticipants[log.step_id] = {} + const key = normalizeKey(log.participant) + const existing = stepParticipants[log.step_id][key] + const existingStatus = existing?.status + const inferredStatus = + log.event === 'step_shared' || existingStatus === 'Shared' ? 'Shared' : 'Completed' + stepParticipants[log.step_id][key] = { + email: existing?.email || log.participant, + role: existing?.role || null, + status: normalizeProgressStatus(inferredStatus), + output_dir: existing?.output_dir || null, + } + } + + // Seed/freeze step timers from local participant activity logs so completed + // cards retain elapsed time across rerenders and sync refreshes. + const myEmailKey = normalizeKey(state.my_email) + for (const log of activityLogs || []) { + const stepId = String(log?.step_id || '').trim() + if (!stepId) continue + if (normalizeKey(log?.participant) !== myEmailKey) continue + const event = String(log?.event || '') + if (!['step_started', 'step_completed', 'step_shared'].includes(event)) continue + const tsMs = parseTimestampMs(log?.timestamp) + if (!tsMs) continue + + const timerKey = getStepTimerKey(sessionId, stepId) + const current = multipartyStepTimers.get(timerKey) || { + startedAt: null, + stoppedAt: null, + } + if (event === 'step_started') { + if (!current.startedAt || tsMs < current.startedAt) current.startedAt = tsMs + } else { + if (!current.startedAt) current.startedAt = tsMs + if (!current.stoppedAt || tsMs > current.stoppedAt) current.stoppedAt = tsMs + } + multipartyStepTimers.set(timerKey, current) + } + // Ensure current participant's latest in-memory state is reflected immediately. + const myEmail = state.my_email + if (myEmail) { + for (const step of state.steps || []) { + if (!stepParticipants[step.id]) stepParticipants[step.id] = {} + const existing = stepParticipants[step.id][normalizeKey(myEmail)] || {} + stepParticipants[step.id][normalizeKey(myEmail)] = { + email: myEmail, + role: state.my_role, + status: normalizeProgressStatus(step.status), + output_dir: step.output_dir || existing.output_dir || null, + } + } + } + + // Keep participant completion states monotonic within a session so transient + // sync lag does not regress chips (e.g. Shared -> Pending flicker). + for (const [stepId, participantsByEmail] of Object.entries(stepParticipants)) { + for (const [emailKey, record] of Object.entries(participantsByEmail || {})) { + const participantEmail = record?.email || emailKey + const normalizedStatus = normalizeProgressStatus(record?.status) + const key = memoryKey(stepId, participantEmail) + const cached = statusMemory.get(key) + const mergedStatus = + cached && progressStatusRank(cached.status) > progressStatusRank(normalizedStatus) + ? cached.status + : normalizedStatus + participantsByEmail[emailKey] = { + ...record, + email: participantEmail, + status: mergedStatus, + output_dir: record?.output_dir || cached?.output_dir || null, + } + statusMemory.set(key, { + status: mergedStatus, + output_dir: participantsByEmail[emailKey].output_dir || null, + }) + } + } + + const getParticipantStepStatus = (stepId, email) => { + const key = normalizeKey(email) + const direct = stepParticipants[stepId]?.[key] + if (direct) return direct + const cached = statusMemory.get(memoryKey(stepId, email)) + if (!cached) return null + return { + email, + role: null, + status: cached.status, + output_dir: cached.output_dir || null, + } + } + + const isParticipantDoneForStep = (step, email) => { + const directStatus = getParticipantStepStatus(step.id, email)?.status + if (isDoneForStep(step, directStatus)) return true + if (!step.is_barrier) return false + + const deps = Array.isArray(step?.depends_on) ? step.depends_on : [] + if (deps.length === 0) return isDoneForStep(step, step.status) + const depsDone = deps.every((depId) => { + const depStep = stepById.get(depId) + if (!depStep) return true + const depStatus = getParticipantStepStatus(depId, email)?.status + return isDoneForStep(depStep, depStatus) + }) + if (depsDone) return true + + // Fallback for distributed lag: if a downstream dependent step already started for this participant, + // treat this barrier as satisfied for chip/status purposes. + const downstreamStarted = (state.steps || []).some((candidate) => { + if (!Array.isArray(candidate?.depends_on) || !candidate.depends_on.includes(step.id)) { + return false + } + if (Array.isArray(candidate?.target_emails) && !candidate.target_emails.includes(email)) { + return false + } + const participantStatus = getParticipantStepStatus(candidate.id, email)?.status + if (participantStatus) { + return ( + participantStatus === 'Running' || + participantStatus === 'Completed' || + participantStatus === 'Shared' || + participantStatus === 'Failed' + ) + } + return false + }) + return downstreamStarted + } + + const getStepCompletion = (step) => { + const requiredEmails = (step.target_emails || []).filter(Boolean) + if (step.is_barrier) { + const doneByParticipants = + requiredEmails.length > 0 + ? requiredEmails.every((email) => isParticipantDoneForStep(step, email)) + : false + const done = doneByParticipants || isDoneForStep(step, step.status) + return { + requiredEmails, + allDone: done, + anyShared: step.status === 'Shared', + } + } + if (requiredEmails.length === 0) { + const done = isDoneForStep(step, step.status) + return { + requiredEmails, + allDone: done, + anyShared: step.status === 'Shared', + } + } + const participantRecords = requiredEmails + .map((email) => getParticipantStepStatus(step.id, email)) + .filter(Boolean) + const allParticipantsDone = requiredEmails.every((email) => { + const status = getParticipantStepStatus(step.id, email)?.status + return isDoneForStep(step, status) + }) + // For targeted collaborative steps, completion must come from participant records. + // Local non-owner step.status can be stale and should not override participant truth. + const allDone = allParticipantsDone + return { + requiredEmails, + allDone, + anyShared: + step.status === 'Shared' || participantRecords.some((p) => p.status === 'Shared'), + } + } + + const getEffectiveStepStatus = (step, completion) => { + if (step.status === 'Failed' || step.status === 'Running' || step.status === 'Sharing') { + return step.status + } + if (completion.allDone) { + return completion.anyShared ? 'Shared' : 'Completed' + } + return step.status + } + const toTitleWords = (value) => + String(value || '') + .replace(/[_-]+/g, ' ') + .trim() + .split(/\s+/) + .filter(Boolean) + .map((part) => part.charAt(0).toUpperCase() + part.slice(1)) + .join(' ') + const resolveStepDisplayName = (step) => { + const id = String(step?.id || '').trim() + const name = String(step?.name || '').trim() + if (!id && !name) return 'Step' + if (name && name.toLowerCase() !== id.toLowerCase()) return name + + const code = String(step?.code_preview || '') + const shareKeyMatch = code.match(/share:\s*\n\s+([A-Za-z0-9_-]+)\s*:/m) + const shareKey = shareKeyMatch?.[1] || '' + const noun = shareKey ? toTitleWords(shareKey.replace(/_shared$/i, '')) : '' + const verb = toTitleWords(id) + if (verb && noun && !verb.toLowerCase().includes(noun.toLowerCase())) { + return `${verb} ${noun}` + } + return verb || name || id + } + const stepById = new Map((state.steps || []).map((s) => [s.id, s])) + const areDependenciesSatisfied = (step) => { + const deps = Array.isArray(step?.depends_on) ? step.depends_on : [] + if (deps.length === 0) return true + return deps.every((depId) => { + const depStep = stepById.get(depId) + if (!depStep) return true + return getStepCompletion(depStep).allDone + }) + } + const isMyStepActionable = (step) => { + if (!step?.my_action) return false + const readyToShare = + step.shares_output && step.status === 'Completed' && !step.outputs_shared + const readyToRun = + step.status === 'Ready' || (step.status === 'Pending' && areDependenciesSatisfied(step)) + if (!readyToRun && !readyToShare) return false + return areDependenciesSatisfied(step) + } + + // Find next actionable step for "Run Next" button + const myNextStep = state.steps.find((s) => isMyStepActionable(s)) + const totalSteps = state.steps.length + let completedSteps = state.steps.filter((step) => getStepCompletion(step).allDone).length + let allComplete = completedSteps === totalSteps && totalSteps > 0 + + if (!allComplete && state.thread_id) { + const myAssignedDone = (state.steps || []) + .filter((s) => s.my_action) + .every((s) => { + if (s.shares_output) return s.status === 'Shared' || s.outputs_shared + return s.status === 'Completed' || s.status === 'Shared' + }) + const finalShareStepId = [...(state.steps || [])].reverse().find((s) => s.shares_output)?.id + if (myAssignedDone && finalShareStepId) { + try { + const threadMessages = await invoke('get_thread_messages', { + threadId: state.thread_id, + }) + const sawFinalShare = (threadMessages || []).some((msg) => { + const metadataRaw = msg?.metadata + if (!metadataRaw) return false + let metadata = metadataRaw + if (typeof metadataRaw === 'string') { + try { + metadata = JSON.parse(metadataRaw) + } catch (_e) { + return false + } + } + return metadata?.flow_results?.step_id === finalShareStepId + }) + if (sawFinalShare) { + completedSteps = totalSteps + allComplete = totalSteps > 0 + } + } catch (_e) { + // Ignore message-query errors; primary completion logic still applies. + } + } + } + + const progressPercent = totalSteps > 0 ? Math.round((completedSteps / totalSteps) * 100) : 0 + updateMultipartyRunCardState(runId, allComplete) + + const waitingOnMeStep = (state.steps || []).find((s) => isMyStepActionable(s)) + const dependencyBlockersForStep = (step) => { + const deps = Array.isArray(step?.depends_on) ? step.depends_on : [] + if (!deps.length) return [] + const blockers = [] + for (const depId of deps) { + const depStep = stepById.get(depId) + if (!depStep) continue + const depCompletion = getStepCompletion(depStep) + for (const email of depCompletion.requiredEmails || []) { + const status = getParticipantStepStatus(depStep.id, email)?.status + if (!isDoneForStep(depStep, status)) blockers.push(email) + } + } + return [...new Set(blockers)] + } + const waitingOnOthers = waitingOnMeStep + ? dependencyBlockersForStep(waitingOnMeStep).filter((e) => e !== state.my_email) + : (() => { + const waitingParticipants = [] + for (const step of state.steps || []) { + const completion = getStepCompletion(step) + if (completion.allDone) continue + for (const email of completion.requiredEmails || []) { + const status = getParticipantStepStatus(step.id, email)?.status + if (!isDoneForStep(step, status)) waitingParticipants.push(email) + } + } + return [...new Set(waitingParticipants)].filter((e) => e !== state.my_email) + })() + waitingOnOthers.sort((a, b) => String(a).localeCompare(String(b))) + const waitingOnMeLabel = + waitingOnMeStep && + waitingOnMeStep.shares_output && + waitingOnMeStep.status === 'Completed' && + !waitingOnMeStep.outputs_shared + ? `share ${resolveStepDisplayName(waitingOnMeStep)}` + : resolveStepDisplayName(waitingOnMeStep) || 'next step' + + const waitingBannerHtml = waitingOnMeStep + ? `
⚠ Waiting on YOU: ${escapeHtml(waitingOnMeLabel)}${waitingOnOthers.length ? ` | Others waiting: ${waitingOnOthers.map((e) => `${escapeHtml(e)}`).join(' ')}` : ''}
` + : waitingOnOthers.length + ? `
Waiting on: ${waitingOnOthers.map((e) => `${escapeHtml(e)}`).join(' ')}
` + : `
No blockers
` + + // Progress bar and Run Next button + const progressHtml = ` +
+
+
+
+
${allComplete ? '✓ Done' : `${completedSteps}/${totalSteps} steps complete`}
+ ${waitingBannerHtml} +
+ + +
+ ${ + myNextStep + ? `` + : '' + } +
+ ` + + // Render participant chips showing ALL participants for each step + // Shows checkbox status: ☑ = completed/shared, ☐ = not done yet, greyed = not involved + const renderParticipantChips = (step, stepId, stepTargetEmails) => { + const allParticipants = state.participants || [] + + return `
+ ${allParticipants + .map((p) => { + // Check if this participant's email is in the step's target emails + const isInvolved = stepTargetEmails.includes(p.email) + if (!isInvolved) { + // Greyed out - not involved in this step + return ` + ☐ ${escapeHtml(p.email)} + ` + } + + // Check if this participant has completed/shared this step + const completed = getParticipantStepStatus(stepId, p.email) + const isComplete = isParticipantDoneForStep(step, p.email) + const isShared = completed?.status === 'Shared' + const isReadyToShare = step.shares_output && completed?.status === 'Completed' + + const checkbox = isComplete ? '☑' : '☐' + const statusClass = isComplete + ? isShared + ? 'shared' + : 'completed' + : isReadyToShare + ? 'ready-share' + : 'pending' + const statusText = isShared + ? 'Shared' + : isComplete + ? 'Completed' + : isReadyToShare + ? 'Ready to share' + : 'Pending' + + return ` + ${checkbox} ${escapeHtml(p.email)} + ` + }) + .join('')} +
` + } + + const renderStepContributions = (step, completion) => { + const rows = completion.requiredEmails + .map((email) => { + const record = getParticipantStepStatus(step.id, email) + const sharedEnough = step.shares_output + ? record?.status === 'Shared' + : record && (record.status === 'Completed' || record.status === 'Shared') + if (!record || !sharedEnough || !record.output_dir) return '' + const escapedPath = String(record.output_dir) + .replace(/\\/g, '\\\\') + .replace(/'/g, "\\'") + return `
+ ${escapeHtml(record.email)} (${escapeHtml(record.status)}) + +
` + }) + .filter(Boolean) + .join('') + if (!rows) return '' + return `
${rows}
` + } + const renderInputWaiting = (step) => { + const waitingOn = Array.isArray(step?.input_waiting_on) + ? [...new Set(step.input_waiting_on.map((e) => String(e || '').trim()).filter(Boolean))] + : [] + if (!waitingOn.length) return '' + const chips = waitingOn + .map( + (email) => ``, + ) + .join('') + const reason = step?.input_waiting_reason ? String(step.input_waiting_reason) : '' + return `
+
⏳ Waiting for required shared inputs from:
+
${chips}
+ ${reason ? `
${escapeHtml(reason)}
` : ''} +
` + } + + const stepExpandedState = getNestedState(multipartyStepExpanded, sessionId) + const stepsHtml = state.steps + .map((step) => { + const completion = getStepCompletion(step) + const effectiveStatus = getEffectiveStepStatus(step, completion) + updateStepTimer(sessionId, step.id, effectiveStatus) + const stepTimerLabel = getStepTimerLabel(sessionId, step.id) + const statusClass = `mp-step-${effectiveStatus.toLowerCase()}` + let statusIcon = getStepStatusIcon(effectiveStatus) + const isMyAction = step.my_action + const isNextStep = myNextStep && step.id === myNextStep.id + if ( + isMyAction && + step.shares_output && + effectiveStatus === 'Completed' && + !step.outputs_shared + ) { + statusIcon = '📤' + } + const codeExpandedState = getNestedState(multipartyCodeExpanded, sessionId) + const logExpandedState = getNestedState(multipartyLogExpanded, sessionId) + const defaultExpanded = + !!isNextStep || effectiveStatus === 'Running' || effectiveStatus === 'Failed' + const isExpanded = stepExpandedState.has(step.id) + ? stepExpandedState.get(step.id) + : defaultExpanded + const defaultLogExpanded = effectiveStatus === 'Running' || effectiveStatus === 'Failed' + const isLogExpanded = logExpandedState.has(step.id) + ? logExpandedState.get(step.id) === true + : defaultLogExpanded + const isCompleted = effectiveStatus === 'Completed' || effectiveStatus === 'Shared' + const dependenciesSatisfied = areDependenciesSatisfied(step) + + // Only show auto toggle for pending/ready steps that are mine + const showAutoToggle = isMyAction && !isCompleted + + return ` +
+ +
+ ${renderParticipantChips(step, step.id, step.target_emails || [])} + ${renderStepConnectivity(step, sessionId, diagnosticsByStepId.get(step.id))} +
${escapeHtml(step.description)}
+ ${renderInputWaiting(step)} + ${ + step.code_preview + ? `
+ Show code +
${escapeHtml(step.code_preview)}
+
` + : '' + } +
+ + Show logs + + +
Loading logs...
+
+
+ ${ + showAutoToggle + ? `` + : '' + } + ${renderStepActions(step, sessionId, isMyAction, effectiveStatus, dependenciesSatisfied)} +
+ ${renderStepContributions(step, completion)} +
+
+ ` + }) + .join('') + + // Build tabbed interface with Steps and Activity Log + const tabsHtml = ` +
+
+ + +
+
+ ${stepsHtml} +
+
+
+
Loading logs...
+
+
+
+ ` + + const nextHtml = progressHtml + tabsHtml + const renderSnapshot = { + activeTab, + expandedOverrides: Array.from(stepExpandedState.entries()).sort((a, b) => + String(a[0]).localeCompare(String(b[0])), + ), + progressPercent, + completedSteps, + totalSteps, + allComplete, + waitingOnMeStepId: waitingOnMeStep?.id || '', + waitingOnOthers: waitingOnOthers.slice().sort(), + diagnostics: Array.from(diagnosticsByStepId.values()) + .map((diag) => ({ + stepId: diag.step_id, + channels: (diag.channels || []).map((channel) => ({ + id: channel.channel_id, + status: channel.status, + listener: channel.listener_up === true ? 1 : channel.listener_up === false ? -1 : 0, + })), + peers: (diag.peers || []) + .map((peer) => ({ + email: peer.email, + status: peer.status, + mode: peer.mode_short || peer.mode || '', + })) + .sort((a, b) => String(a.email).localeCompare(String(b.email))), + })) + .sort((a, b) => String(a.stepId).localeCompare(String(b.stepId))), + steps: (state.steps || []).map((step) => { + const completion = getStepCompletion(step) + const effectiveStatus = getEffectiveStepStatus(step, completion) + const participants = (step.target_emails || []) + .map((email) => ({ + email, + status: getParticipantStepStatus(step.id, email)?.status || '', + })) + .sort((a, b) => a.email.localeCompare(b.email)) + return { + id: step.id, + effectiveStatus, + autoRun: !!step.auto_run, + myAction: !!step.my_action, + outputsShared: !!step.outputs_shared, + participants, + } + }), + } + const renderKey = JSON.stringify(renderSnapshot) + const previousRenderKey = multipartyRenderKeys.get(sessionId) + const hasRenderedTabs = !!stepsContainer.querySelector('.mp-tabs-container') + const shouldRerender = !hasRenderedTabs || previousRenderKey !== renderKey + + if (shouldRerender) { + stopAllStepLogPolling(sessionId) + stepsContainer.innerHTML = nextHtml + stepsContainer.scrollTop = previousScrollTop + multipartyRenderedHtml.set(sessionId, nextHtml) + multipartyRenderKeys.set(sessionId, renderKey) + + stepsContainer.querySelectorAll('.mp-step-logs[open]').forEach((detailsEl) => { + const stepId = detailsEl.dataset.stepId + if (stepId) { + window.runsModule?.toggleStepLogs(detailsEl, sessionId, stepId, true) + } + }) + } + stepsContainer.querySelector('.mp-steps-transient-error')?.remove() + refreshStepTimerNodes(sessionId) + + // Load logs asynchronously + loadParticipantLogs(sessionId) + } catch (error) { + console.error('Failed to load multiparty steps:', error) + const hasRenderedSteps = !!stepsContainer.querySelector('.mp-tabs-container') + if (!hasRenderedSteps) { + multipartyRenderedHtml.delete(sessionId) + multipartyRenderKeys.delete(sessionId) + stepsContainer.innerHTML = `
Failed to load steps: ${escapeHtml(String(error))}
` + return + } + // Keep last good render and surface a non-blocking warning. + const existing = stepsContainer.querySelector('.mp-steps-transient-error') + const message = `Temporary refresh issue: ${escapeHtml(String(error))}` + if (existing) { + existing.innerHTML = message + } else { + const banner = document.createElement('div') + banner.className = 'mp-steps-transient-error' + banner.innerHTML = message + banner.style.cssText = + 'margin: 8px 0 12px; padding: 8px 10px; border: 1px solid #fecaca; background: #fef2f2; color: #991b1b; border-radius: 8px; font-size: 12px;' + stepsContainer.prepend(banner) + } + } + } + + // Load and display participant logs + async function loadParticipantLogs(sessionId) { + const logsContainer = document.querySelector(`.mp-logs-content[data-session="${sessionId}"]`) + if (!logsContainer) return + + try { + const logs = await invoke('get_participant_logs', { sessionId }) + if (!logs || logs.length === 0) { + logsContainer.innerHTML = '
No activity yet
' + return + } + const sortedLogs = [...logs].sort( + (a, b) => new Date(b.timestamp || 0).getTime() - new Date(a.timestamp || 0).getTime(), + ) + + const logsHtml = sortedLogs + .map((log) => { + const time = log.timestamp ? new Date(log.timestamp).toLocaleTimeString() : '' + const participant = log.participant || log.role + const rawStepId = log.step_id == null ? '' : String(log.step_id).trim() + const normalizedStepId = + rawStepId && rawStepId !== 'null' && rawStepId !== 'undefined' ? rawStepId : '' + const stepLabel = normalizedStepId ? `"${normalizedStepId}"` : 'a step' + const eventText = + log.event === 'joined' + ? `${participant} joined the flow` + : log.event === 'step_completed' + ? `${participant} completed ${stepLabel}` + : log.event === 'step_shared' + ? `${participant} shared outputs from ${stepLabel}` + : log.event === 'step_started' + ? `${participant} started ${stepLabel}` + : log.event === 'barrier_completed' + ? `${participant} completed barrier ${stepLabel}` + : `${participant}: ${log.event}` + return `
+ ${time} + ${escapeHtml(eventText)} +
` + }) + .join('') + + logsContainer.innerHTML = logsHtml + } catch (error) { + logsContainer.innerHTML = `
Failed to load logs
` + } + } + + function getStepStatusIcon(status) { + const icons = { + Pending: '⏳', + WaitingForInputs: '📥', + Ready: '✅', + Running: '🔄', + Completed: '✓', + Sharing: '📤', + Shared: '📨', + Failed: '❌', + } + return icons[status] || '❓' + } + + function renderStepActions( + step, + sessionId, + isMyAction, + effectiveStatus = step.status, + dependenciesSatisfied = true, + ) { + if (!isMyAction) { + if (effectiveStatus === 'Shared') { + return '📨 Shared' + } + if (effectiveStatus === 'Completed') { + if (step.shares_output) { + return '⏳ Ready to share' + } + return '✓ Done' + } + return 'Waiting for participant' + } + + const actions = [] + if (step.module_path) { + actions.push( + ``, + ) + } + if ((effectiveStatus === 'Ready' || effectiveStatus === 'Pending') && dependenciesSatisfied) { + actions.push( + ``, + ) + } + if ((effectiveStatus === 'Ready' || effectiveStatus === 'Pending') && !dependenciesSatisfied) { + actions.push('⏳ Waiting for dependencies') + } + if (effectiveStatus === 'Running') { + actions.push('Running...') + actions.push( + ``, + ) + } + if (effectiveStatus === 'Completed' && step.shares_output && !step.outputs_shared) { + actions.push( + ``, + ) + actions.push( + ``, + ) + } + if (step.shares_output && step.outputs_shared && effectiveStatus !== 'Shared') { + actions.push( + ``, + ) + actions.push('📨 Shared') + actions.push( + ``, + ) + } + if (effectiveStatus === 'Completed' && !step.shares_output) { + actions.push('✓ Done') + } + if (effectiveStatus === 'Shared') { + // Preview should remain available after sharing + actions.push( + ``, + ) + actions.push('📨 Shared') + actions.push( + ``, + ) + } + if (effectiveStatus === 'Failed') { + actions.push( + ``, + ) + } + if (step.status === 'WaitingForInputs') { + const waitingOn = Array.isArray(step?.input_waiting_on) ? step.input_waiting_on : [] + if (waitingOn.length > 0) { + actions.push( + `⏳ Waiting on: ${waitingOn.map((e) => escapeHtml(String(e))).join(', ')}`, + ) + } else { + actions.push('Waiting for inputs...') + } + } + if (!actions.length) { + if (effectiveStatus === 'Shared') return '📨 Shared' + if (effectiveStatus === 'Completed') { + if (step.shares_output) return '⏳ Ready to share' + return '✓ Done' + } + } + + return actions.join('') + } + + function escapeHtml(text) { + if (!text) return '' + const div = document.createElement('div') + div.textContent = text + return div.innerHTML + } + + function formatByteCount(bytes) { + const value = Number(bytes || 0) + if (!Number.isFinite(value) || value <= 0) return '0 B' + const units = ['B', 'KB', 'MB', 'GB', 'TB'] + let unitIndex = 0 + let scaled = value + while (scaled >= 1024 && unitIndex < units.length - 1) { + scaled /= 1024 + unitIndex += 1 + } + const digits = scaled >= 100 || unitIndex === 0 ? 0 : 1 + return `${scaled.toFixed(digits)} ${units[unitIndex]}` + } + + function formatRate(bytesPerSec) { + const value = Number(bytesPerSec || 0) + if (!Number.isFinite(value) || value <= 0) return '0 B/s' + return `${formatByteCount(value)}/s` + } + + function shortIdentity(value) { + const text = String(value || '') + if (!text) return 'unknown' + return text.split('@')[0] || text + } + + function getDiagnosticsSampleKey(sessionId, stepId) { + return `${sessionId}::${stepId}::diag` + } + + function getPeerRateMap(sessionId, stepId, peers) { + const key = getDiagnosticsSampleKey(sessionId, stepId) + const previous = multipartyDiagnosticsSamples.get(key) || {} + const next = {} + const rateMap = {} + const nowMs = Date.now() + + for (const peer of peers || []) { + const email = String(peer?.email || '') + if (!email) continue + const txBytes = Number(peer?.tx_bytes || 0) + const rxBytes = Number(peer?.rx_bytes || 0) + const updatedMsRaw = Number(peer?.updated_ms || 0) + const updatedMs = Number.isFinite(updatedMsRaw) && updatedMsRaw > 0 ? updatedMsRaw : nowMs + let txRate = 0 + let rxRate = 0 + const prev = previous[email] + if (prev) { + const deltaMs = Math.max(1, updatedMs - Number(prev.updatedMs || prev.seenMs || 0)) + txRate = (Math.max(0, txBytes - Number(prev.txBytes || 0)) * 1000) / deltaMs + rxRate = (Math.max(0, rxBytes - Number(prev.rxBytes || 0)) * 1000) / deltaMs + } + next[email] = { + txBytes, + rxBytes, + updatedMs, + seenMs: nowMs, + } + rateMap[email] = { txRate, rxRate } + } + + multipartyDiagnosticsSamples.set(key, next) + return rateMap + } + + function renderStepConnectivity(step, sessionId, diagnostics) { + if (!diagnostics) return '' + const channels = Array.isArray(diagnostics.channels) ? diagnostics.channels : [] + const peers = Array.isArray(diagnostics.peers) ? diagnostics.peers : [] + if (!channels.length && !peers.length) return '' + + const peerRates = getPeerRateMap(sessionId, step.id, peers) + const anyConnected = channels.some((channel) => channel?.status === 'connected') + const anyEstablishing = channels.some((channel) => channel?.status === 'establishing') + const liveLabel = anyConnected ? 'Connected' : anyEstablishing ? 'Establishing' : 'Waiting' + const liveClass = anyConnected ? 'connected' : anyEstablishing ? 'establishing' : 'waiting' + + const linksHtml = channels + .map((channel) => { + const status = String(channel?.status || 'waiting') + const fromLabel = shortIdentity(channel?.from_email || '') + const toLabel = shortIdentity(channel?.to_email || '') + const portLabel = channel?.port ? `port ${channel.port}` : 'port pending' + const listenerLabel = + channel?.listener_up === true + ? 'listener up' + : channel?.listener_up === false + ? 'listener down' + : 'listener pending' + return `` + }) + .join('') + + const peersHtml = peers + .map((peer) => { + const email = String(peer?.email || '') + const status = String(peer?.status || 'pending') + const rates = peerRates[email] || { txRate: 0, rxRate: 0 } + const mode = String(peer?.mode_short || peer?.mode || 'unknown') + const txAvg = Number(peer?.tx_avg_send_ms || 0) + const rxAvg = Number(peer?.rx_avg_write_ms || 0) + const latencyParts = [] + if (txAvg > 0) latencyParts.push(`TX ${txAvg.toFixed(1)}ms`) + if (rxAvg > 0) latencyParts.push(`RX ${rxAvg.toFixed(1)}ms`) + const latencyLabel = latencyParts.length ? latencyParts.join(' · ') : 'Latency n/a' + const ageMs = Number(peer?.age_ms || 0) + const freshness = + status === 'connected' + ? 'live' + : status === 'stale' + ? `${Math.round(ageMs / 1000)}s old` + : 'pending' + return `
+
+ + ${escapeHtml(shortIdentity(email))} + ${escapeHtml(mode)} +
+
+ TX ${escapeHtml(formatRate(rates.txRate))} + RX ${escapeHtml(formatRate(rates.rxRate))} +
+
+ ${escapeHtml(latencyLabel)} + ${escapeHtml(freshness)} +
+
+ Total TX ${escapeHtml(formatByteCount(peer?.tx_bytes || 0))} + Total RX ${escapeHtml(formatByteCount(peer?.rx_bytes || 0))} +
+
` + }) + .join('') + + return `
+
+ Transport connectivity + ${escapeHtml(liveLabel)} +
+ ${linksHtml ? `` : ''} + ${peersHtml ? `
${peersHtml}
` : ''} +
` + } + + // Expose multiparty step controls to window + window.runsModule = window.runsModule || {} + window.runsModule.toggleStepAutoRun = async function (checkbox) { + const sessionId = checkbox.dataset.session + const stepId = checkbox.dataset.step + try { + await invoke('set_step_auto_run', { sessionId, stepId, autoRun: checkbox.checked }) + } catch (error) { + console.error('Failed to toggle auto-run:', error) + checkbox.checked = !checkbox.checked + } + } + window.runsModule.toggleStepExpanded = function (sessionId, stepId) { + const expandedState = getNestedState(multipartyStepExpanded, sessionId) + const current = expandedState.get(stepId) ?? false + expandedState.set(stepId, !current) + const runCard = document + .querySelector(`[data-session-id="${sessionId}"]`) + ?.closest('.flow-run-card') + if (runCard) { + const runId = runCard.dataset.runId + loadMultipartySteps(sessionId, runId).catch(() => {}) + } + } + window.runsModule.setAllStepsExpanded = function (sessionId, expand) { + const runCard = document + .querySelector(`[data-session-id="${sessionId}"]`) + ?.closest('.flow-run-card') + if (!runCard) return + const runId = runCard.dataset.runId + const steps = runCard.querySelectorAll('.mp-step[data-step-id]') + const expandedState = getNestedState(multipartyStepExpanded, sessionId) + steps.forEach((el) => { + const stepId = el.getAttribute('data-step-id') + if (stepId) expandedState.set(stepId, !!expand) + }) + loadMultipartySteps(sessionId, runId).catch(() => {}) + } + window.runsModule.rememberCodeToggle = function (sessionId, stepId, isOpen) { + const codeState = getNestedState(multipartyCodeExpanded, sessionId) + codeState.set(stepId, !!isOpen) + } + window.runsModule.toggleStepLogs = function (detailsEl, sessionId, stepId, isOpen) { + const logState = getNestedState(multipartyLogExpanded, sessionId) + logState.set(stepId, !!isOpen) + stopStepLogPolling(sessionId, stepId) + if (!isOpen) return + + const preEl = detailsEl?.querySelector('.mp-step-log-block') + if (!preEl) return + const cacheKey = getStepLogKey(sessionId, stepId) + + const refreshLogs = async () => { + try { + const text = await invoke('get_multiparty_step_logs', { + sessionId, + stepId, + lines: 240, + }) + const rendered = String(text || '').trim() || 'No step-specific logs yet.' + const previous = multipartyStepLogCache.get(cacheKey) + if (previous === rendered) return + const wasAtBottom = preEl.scrollHeight - preEl.scrollTop - preEl.clientHeight < 24 + const previousScrollTop = preEl.scrollTop + preEl.textContent = rendered + multipartyStepLogCache.set(cacheKey, rendered) + if (wasAtBottom) { + preEl.scrollTop = preEl.scrollHeight + } else { + const maxScrollTop = Math.max(0, preEl.scrollHeight - preEl.clientHeight) + preEl.scrollTop = Math.min(previousScrollTop, maxScrollTop) + } + } catch (error) { + const renderedError = `Failed to load logs: ${error}` + if (multipartyStepLogCache.get(cacheKey) !== renderedError) { + preEl.textContent = renderedError + multipartyStepLogCache.set(cacheKey, renderedError) + } + } + } + + refreshLogs() + const interval = setInterval(refreshLogs, 2000) + multipartyStepLogIntervals.set(getStepLogKey(sessionId, stepId), interval) + } + window.runsModule.copyStepLogs = async function (event, buttonEl, sessionId, stepId) { + try { + event?.preventDefault?.() + event?.stopPropagation?.() + const cacheKey = getStepLogKey(sessionId, stepId) + let text = String(multipartyStepLogCache.get(cacheKey) || '').trim() + if (!text) { + const fetched = await invoke('get_multiparty_step_logs', { + sessionId, + stepId, + lines: 240, + }) + text = String(fetched || '').trim() + if (text) multipartyStepLogCache.set(cacheKey, text) + } + if (!text) text = 'No step-specific logs yet.' + await navigator.clipboard.writeText(text) + if (buttonEl) { + const original = buttonEl.textContent + buttonEl.textContent = 'Copied' + setTimeout(() => { + buttonEl.textContent = original || 'Copy logs' + }, 1200) + } + } catch (error) { + console.error('Failed to copy step logs:', error) + if (buttonEl) { + const original = buttonEl.textContent + buttonEl.textContent = 'Copy failed' + setTimeout(() => { + buttonEl.textContent = original || 'Copy logs' + }, 1600) + } + } + } + window.runsModule.runStep = async function (sessionId, stepId) { + const executeStep = async () => { + await invoke('run_flow_step', { sessionId, stepId }) + // Refresh the steps display + const runCard = document + .querySelector(`[data-session-id="${sessionId}"]`) + ?.closest('.flow-run-card') + if (runCard) { + const runId = runCard.dataset.runId + await loadMultipartySteps(sessionId, runId) + } + } + + try { + let dockerRunning = true + try { + dockerRunning = await invoke('check_docker_running') + } catch (checkError) { + console.warn('Docker check failed before multiparty step run:', checkError) + dockerRunning = false + } + + if (dockerRunning) { + await executeStep() + } else { + await showDockerWarningModal(executeStep) + } + } catch (error) { + console.error('Failed to run step:', error) + alert(`Failed to run step: ${error}`) + } + } + window.runsModule.openParticipantDatasite = async function (sessionId, participantEmail) { + try { + const datasitePath = await invoke('get_multiparty_participant_datasite_path', { + sessionId, + participantEmail, + }) + if (!datasitePath) { + throw new Error(`Datasite path unavailable for ${participantEmail}`) + } + await invoke('open_folder', { path: datasitePath }) + } catch (error) { + console.error('Failed to open participant datasite:', error) + alert(`Failed to open datasite for ${participantEmail}: ${error}`) + } + } + window.runsModule.cancelRunningStep = async function (sessionId, stepId) { + try { + const confirmed = await confirmWithDialog( + `Cancel currently running step "${stepId}"? This pauses the active flow run so it can be retried.`, + { title: 'Cancel Running Step', type: 'warning' }, + ) + if (!confirmed) return + + const state = await invoke('get_multiparty_flow_state', { sessionId }) + const runId = Number(state?.run_id || 0) + if (!runId) { + throw new Error('No run ID found for this multiparty session') + } + + await invoke('pause_flow_run', { runId }) + + const runCard = document + .querySelector(`[data-session-id="${sessionId}"]`) + ?.closest('.flow-run-card') + if (runCard) { + await loadMultipartySteps(sessionId, runId) + } + } catch (error) { + console.error('Failed to cancel running step:', error) + alert(`Failed to cancel running step: ${error}`) + } + } + window.runsModule.previewStepOutputs = async function (sessionId, stepId) { + try { + const files = await invoke('get_step_output_files', { sessionId, stepId }) + if (files && files.length > 0) { + // Get the folder path from the first file + const firstFile = files[0] + const folderPath = firstFile.substring(0, firstFile.lastIndexOf('/')) + // Open the folder in OS file manager + await invoke('open_folder', { path: folderPath }) + } else { + alert('No output files found') + } + } catch (error) { + console.error('Failed to preview outputs:', error) + alert(`Failed to open folder: ${error}`) + } + } + window.runsModule.shareStepOutputs = async function (sessionId, stepId) { + try { + await invoke('share_step_outputs', { sessionId, stepId }) + const runCard = document + .querySelector(`[data-session-id="${sessionId}"]`) + ?.closest('.flow-run-card') + if (runCard) { + const runId = runCard.dataset.runId + await loadMultipartySteps(sessionId, runId) + } + } catch (error) { + console.error('Failed to share outputs:', error) + alert(`Failed to share: ${error}`) + } + } + window.runsModule.shareStepOutputsToChat = async function (sessionId, stepId) { + try { + const result = await invoke('share_step_outputs_to_chat', { sessionId, stepId }) + alert( + `Results shared to chat! ${result.files_shared} file(s) sent to ${result.recipients.length} participant(s).`, + ) + const runCard = document + .querySelector(`[data-session-id="${sessionId}"]`) + ?.closest('.flow-run-card') + if (runCard) { + const runId = runCard.dataset.runId + await loadMultipartySteps(sessionId, runId) + } + } catch (error) { + console.error('Failed to share outputs to chat:', error) + alert(`Failed to share to chat: ${error}`) + } + } + window.runsModule.viewStepModule = async function (modulePath) { + if (!modulePath) return + try { + await invoke('open_folder', { path: modulePath }) + } catch (error) { + console.error('Failed to open module path:', error) + alert(`Failed to open module path: ${error}`) + } + } + window.runsModule.openFolderPath = async function (folderPath) { + if (!folderPath) return + try { + await invoke('open_folder', { path: folderPath }) + } catch (error) { + console.error('Failed to open folder path:', error) + alert(`Failed to open folder: ${error}`) + } + } + window.runsModule.switchTab = function (tabButton, tabName) { + const container = tabButton.closest('.mp-tabs-container') + if (!container) return + const sessionId = container.dataset.session + if (sessionId) { + multipartyActiveTabs.set(sessionId, tabName) + } + + // Update tab buttons + container.querySelectorAll('.mp-tab').forEach((tab) => tab.classList.remove('active')) + tabButton.classList.add('active') + + // Update tab content + container + .querySelectorAll('.mp-tab-content') + .forEach((content) => content.classList.remove('active')) + const targetContent = container.querySelector(`[data-tab-content="${tabName}"]`) + if (targetContent) { + targetContent.classList.add('active') + } + + if (tabName === 'logs' && sessionId) { + loadParticipantLogs(sessionId).catch(() => {}) + } + } + async function loadRuns() { try { await invoke('reconcile_flow_runs').catch((error) => { @@ -603,8 +2319,28 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { const flows = await invoke('get_flows') // Get flow names const container = document.getElementById('runs-list') + const nextRunsRenderKey = JSON.stringify( + (flowRuns || []).map((run) => ({ + id: run.id, + status: run.status, + flow_id: run.flow_id, + created_at: run.created_at, + results_dir: run.results_dir || null, + work_dir: run.work_dir || null, + metadata: run.metadata || null, + })), + ) + if ( + !autoExpandRunId && + container?.children?.length > 0 && + runsRenderKey === nextRunsRenderKey + ) { + return + } + runsRenderKey = nextRunsRenderKey if (flowRuns.length === 0) { + runsRenderKey = '' container.innerHTML = `
@@ -636,7 +2372,20 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { ? runMetadata.nextflow_max_forks : null const dataSelection = runMetadata.data_selection || {} - const titleParts = [flowName] + const titleParts = [] + + // Add multiparty indicator if applicable + if (runMetadata.type === 'multiparty') { + titleParts.push('👥') + } + + titleParts.push(flowName) + + // Add role for multiparty runs + if (runMetadata.type === 'multiparty' && runMetadata.my_role) { + titleParts.push(`(${runMetadata.my_role})`) + } + if (dataSelection.dataset_name) titleParts.push(dataSelection.dataset_name) if (Array.isArray(dataSelection.asset_keys) && dataSelection.asset_keys.length > 0) { titleParts.push(dataSelection.asset_keys.join(', ')) @@ -645,7 +2394,7 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { const typeLabel = dataSelection.data_type === 'private' ? 'real' : dataSelection.data_type titleParts.push(typeLabel) } - const runTitle = titleParts.join(' - ') + const runTitle = titleParts.join(' ') const card = document.createElement('div') card.className = 'flow-run-card' card.dataset.runId = run.id @@ -858,10 +2607,17 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => {
-
Loading steps...
+ ${ + runMetadata.type === 'multiparty' + ? renderMultipartyDetails(runMetadata, run.id) + : '
Loading steps...
' + }
` + const isMultiparty = runMetadata.type === 'multiparty' + card.dataset.isMultiparty = isMultiparty ? 'true' : 'false' + // Handle expand/collapse const header = card.querySelector('.run-header') const chevron = card.querySelector('.run-chevron') @@ -888,20 +2644,17 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { // Auto-expand if this is newly created OR currently running if (shouldAutoExpand) { - const detailsContainer = card.querySelector('.run-details') updateExpandedState(true) - // Load steps immediately for auto-expanded run - loadFlowRunSteps(run, flow, detailsContainer).catch(console.error) - // Scroll to this card after a brief delay (only for newly created, not all running) - if (isNewlyCreated) { - setTimeout(() => { - card.scrollIntoView({ behavior: 'smooth', block: 'nearest' }) - }, 100) - } } else { updateExpandedState(false) } + // Store data for post-append loading + card.dataset.shouldLoadSteps = shouldAutoExpand && isMultiparty ? 'true' : 'false' + card.dataset.shouldLoadFlowSteps = shouldAutoExpand && !isMultiparty ? 'true' : 'false' + card.dataset.sessionId = isMultiparty ? runMetadata.session_id || '' : '' + card.dataset.isNewlyCreated = isNewlyCreated ? 'true' : 'false' + header.addEventListener('click', async (e) => { // Don't expand if clicking action buttons if ( @@ -930,10 +2683,19 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { const detailsContainer = card.querySelector('.run-details') if (newExpandedState) { detailsContainer.style.display = 'block' - await loadFlowRunSteps(run, flow, detailsContainer) + if (isMultiparty) { + await loadMultipartySteps(runMetadata.session_id, run.id) + // Start polling for updates from other participants + startMultipartyPolling(runMetadata.session_id, run.id) + } else { + await loadFlowRunSteps(run, flow, detailsContainer) + } } else { detailsContainer.style.display = 'none' stopFlowLogPolling(run.id) + if (isMultiparty) { + stopMultipartyPolling(runMetadata.session_id) + } } }) @@ -1209,6 +2971,35 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { } container.appendChild(card) + + const mpInputsToggle = card.querySelector('.mp-inputs-toggle') + if (mpInputsToggle) { + mpInputsToggle.addEventListener('click', () => { + const runId = mpInputsToggle.dataset.runId + const body = card.querySelector(`.mp-inputs-body[data-run-id="${runId}"]`) + if (!body) return + const expanded = body.style.display !== 'none' + body.style.display = expanded ? 'none' : 'block' + }) + } + + // Load steps AFTER card is in DOM + if (card.dataset.shouldLoadSteps === 'true') { + const sessionId = card.dataset.sessionId + loadMultipartySteps(sessionId, run.id).catch(console.error) + // Start polling for updates from other participants + startMultipartyPolling(sessionId, run.id) + } else if (card.dataset.shouldLoadFlowSteps === 'true') { + const detailsContainer = card.querySelector('.run-details') + loadFlowRunSteps(run, flow, detailsContainer).catch(console.error) + } + + // Scroll to newly created cards + if (card.dataset.isNewlyCreated === 'true') { + setTimeout(() => { + card.scrollIntoView({ behavior: 'smooth', block: 'nearest' }) + }, 100) + } }) } catch (error) { console.error('Error loading runs:', error) @@ -1879,13 +3670,6 @@ export function createRunsModule({ invoke, listen, dialog, refreshLogs = () => { } } - // Helper function for escaping HTML - function escapeHtml(text) { - const div = document.createElement('div') - div.textContent = text - return div.innerHTML - } - async function runAnalysis() { if (selectedParticipants.length === 0 || selectedModule === null) return diff --git a/src/templates/messages.html b/src/templates/messages.html index 79b6de60..6fc6489c 100644 --- a/src/templates/messages.html +++ b/src/templates/messages.html @@ -204,9 +204,9 @@

New Message

+ + + + + + diff --git a/test-scenario.sh b/test-scenario.sh index 31c90f48..2c9f7b3e 100755 --- a/test-scenario.sh +++ b/test-scenario.sh @@ -40,11 +40,27 @@ if [[ ! -d "$SYFTBOX_SDK_DIR" && -d "$BIOVAULT_DIR/syftbox-sdk" ]]; then SYFTBOX_SDK_DIR="$BIOVAULT_DIR/syftbox-sdk" fi DEVSTACK_SCRIPT="$BIOVAULT_DIR/tests/scripts/devstack.sh" -WS_PORT_BASE="${DEV_WS_BRIDGE_PORT_BASE:-3333}" +UI_PORT_EXPLICIT=0 +if [[ -n "${UI_PORT+x}" && -n "${UI_PORT}" ]]; then + UI_PORT_EXPLICIT=1 +fi +WS_PORT_BASE_EXPLICIT=0 +if [[ -n "${DEV_WS_BRIDGE_PORT_BASE+x}" && -n "${DEV_WS_BRIDGE_PORT_BASE}" ]]; then + WS_PORT_BASE_EXPLICIT=1 +fi +UNIFIED_LOG_PORT_EXPLICIT=0 +if [[ -n "${UNIFIED_LOG_PORT+x}" && -n "${UNIFIED_LOG_PORT}" ]]; then + UNIFIED_LOG_PORT_EXPLICIT=1 +fi +WS_PORT_BASE="${DEV_WS_BRIDGE_PORT_BASE:-}" LOG_FILE="${UNIFIED_LOG_FILE:-$ROOT_DIR/logs/unified-scenario.log}" -LOG_PORT="${UNIFIED_LOG_PORT:-9756}" -UI_PORT="${UI_PORT:-8082}" -MAX_PORT="${MAX_PORT:-8092}" +LOG_PORT="${UNIFIED_LOG_PORT:-}" +UI_PORT="${UI_PORT:-}" +UI_PORT_MIN="${UI_PORT_MIN:-8082}" +UI_PORT_MAX="${UI_PORT_MAX:-${MAX_PORT:-8999}}" +MAX_PORT="$UI_PORT_MAX" +WS_PORT_MIN="${DEV_WS_BRIDGE_PORT_MIN:-3333}" +LOG_PORT_MIN="${UNIFIED_LOG_PORT_MIN:-9756}" TRACE=${TRACE:-0} DEVSTACK_RESET="${DEVSTACK_RESET:-1}" TIMING="${TIMING:-1}" @@ -60,6 +76,15 @@ WARM_CACHE_SET=0 INTERACTIVE_MODE=0 # Headed browsers for visibility WAIT_MODE=0 # Keep everything running after test completes CLEANUP_ACTIVE=0 +# Treat explicit env NO_CLEANUP as user intent so scenario auto-preserve does not override it. +NO_CLEANUP_SET=0 +if [[ -n "${NO_CLEANUP+x}" ]]; then + NO_CLEANUP_SET=1 +fi +NO_CLEANUP="${NO_CLEANUP:-0}" # Leave processes/sandbox running on exit for debugging +SYQURE_MULTIPARTY_SECURE_ONLY="${SYQURE_MULTIPARTY_SECURE_ONLY:-0}" +SYQURE_MULTIPARTY_CLI_PARITY="${SYQURE_MULTIPARTY_CLI_PARITY:-0}" +SYQURE_DUMP_TRAFFIC="${SYQURE_DUMP_TRAFFIC:-0}" show_usage() { cat </dev/null 2>&1; then + if lsof -iTCP:"$port" -sTCP:LISTEN -n -P >/dev/null 2>&1; then + return 1 + fi + return 0 + fi python3 - "$port" >/dev/null 2>&1 <<'PY' import socket, sys port = int(sys.argv[1]) @@ -399,12 +549,52 @@ pick_free_port() { return 1 } +random_port_in_range() { + local min="$1" + local max="$2" + if [[ "$min" -le 0 || "$max" -le 0 || "$max" -lt "$min" ]]; then + return 1 + fi + local span=$((max - min + 1)) + echo $((min + RANDOM % span)) +} + +pick_free_port_randomized() { + local min="$1" + local max="$2" + local start + start="$(random_port_in_range "$min" "$max" || true)" + if [[ -z "$start" ]]; then + return 1 + fi + local picked + picked="$(pick_free_port "$start" "$max" || true)" + if [[ -n "$picked" ]]; then + echo "$picked" + return 0 + fi + if [[ "$start" -gt "$min" ]]; then + picked="$(pick_free_port "$min" "$((start - 1))" || true)" + if [[ -n "$picked" ]]; then + echo "$picked" + return 0 + fi + fi + return 1 +} + pick_ws_port_base() { local start="$1" local max="${2:-3499}" local count="${3:-2}" + if [[ "$count" -le 0 ]]; then + return 1 + fi local port="$start" - while [[ "$port" -lt "$max" ]]; do + while [[ "$port" -le "$max" ]]; do + if [[ "$((port + count - 1))" -gt "$max" ]]; then + break + fi local ok=1 local i=0 while [[ "$i" -lt "$count" ]]; do @@ -423,6 +613,124 @@ pick_ws_port_base() { return 1 } +pick_ws_port_base_randomized() { + local min="$1" + local max="$2" + local count="${3:-2}" + if [[ "$count" -le 0 || "$max" -lt "$min" ]]; then + return 1 + fi + local max_start=$((max - count + 1)) + if [[ "$max_start" -lt "$min" ]]; then + return 1 + fi + local start + start="$(random_port_in_range "$min" "$max_start" || true)" + if [[ -z "$start" ]]; then + return 1 + fi + local picked + picked="$(pick_ws_port_base "$start" "$max" "$count" || true)" + if [[ -n "$picked" ]]; then + echo "$picked" + return 0 + fi + if [[ "$start" -gt "$min" ]]; then + picked="$(pick_ws_port_base "$min" "$((start - 1 + count - 1))" "$count" || true)" + if [[ -n "$picked" ]]; then + echo "$picked" + return 0 + fi + fi + return 1 +} + +syqure_max_base_port() { + local party_count="${1:-2}" + local parties="$party_count" + if [[ "$parties" -lt 2 ]]; then + parties=2 + fi + local max_party_base_delta=$(((parties - 1) * 1000)) + local max_pair_offset=$((parties * (parties - 1) / 2)) + local reserve=$((max_party_base_delta + max_pair_offset + 10000 + parties)) + echo $((65535 - reserve)) +} + +syqure_mpc_comm_port_with_base() { + local base="$1" + local local_pid="$2" + local remote_pid="$3" + local parties="$4" + local min_pid="$local_pid" + local max_pid="$remote_pid" + if [[ "$remote_pid" -lt "$local_pid" ]]; then + min_pid="$remote_pid" + max_pid="$local_pid" + fi + local offset_major=$((min_pid * parties - min_pid * (min_pid + 1) / 2)) + local offset_minor=$((max_pid - min_pid)) + echo $((base + offset_major + offset_minor)) +} + +syqure_port_base_is_available() { + local global_base="$1" + local party_count="${2:-2}" + local parties="$party_count" + if [[ "$parties" -lt 2 ]]; then + parties=2 + fi + local party_id + local remote_id + for ((party_id = 0; party_id < parties; party_id++)); do + local party_base=$((global_base + party_id * 1000)) + local sharing_port=$((party_base + 10000)) + if ! is_port_free "$sharing_port"; then + return 1 + fi + for ((remote_id = 0; remote_id < parties; remote_id++)); do + if [[ "$remote_id" -eq "$party_id" ]]; then + continue + fi + local comm_port + comm_port="$(syqure_mpc_comm_port_with_base "$party_base" "$party_id" "$remote_id" "$parties")" + if ! is_port_free "$comm_port"; then + return 1 + fi + done + done + return 0 +} + +pick_syqure_port_base_randomized() { + local party_count="${1:-2}" + local min="${2:-20000}" + local max="${3:-$(syqure_max_base_port "$party_count")}" + if [[ "$max" -lt "$min" ]]; then + return 1 + fi + local start + start="$(random_port_in_range "$min" "$max" || true)" + if [[ -z "$start" ]]; then + return 1 + fi + local span=$((max - min + 1)) + local candidate="$start" + local i=0 + while [[ "$i" -lt "$span" ]]; do + if syqure_port_base_is_available "$candidate" "$party_count"; then + echo "$candidate" + return 0 + fi + candidate=$((candidate + 1)) + if [[ "$candidate" -gt "$max" ]]; then + candidate="$min" + fi + i=$((i + 1)) + done + return 1 +} + detect_platform() { local os arch case "$(uname -s)" in @@ -519,15 +827,38 @@ ensure_playwright_browsers() { # Kill any dangling Jupyter processes from previous runs kill_workspace_jupyter +# Kill any stale bv-desktop/syftboxd/syqure processes from previous runs in this workspace. +# These cause port conflicts, file lock contention, and non-deterministic test failures. +stale_pids="$(pgrep -f "$ROOT_DIR.*(bv-desktop|bv syftboxd|syqure|syftbox-rs)" 2>/dev/null || true)" +if [[ -n "$stale_pids" ]]; then + info "Killing stale workspace processes from previous runs: $stale_pids" + echo "$stale_pids" | xargs kill 2>/dev/null || true + sleep 1 + # Force-kill survivors + stale_pids="$(pgrep -f "$ROOT_DIR.*(bv-desktop|bv syftboxd|syqure|syftbox-rs)" 2>/dev/null || true)" + if [[ -n "$stale_pids" ]]; then + echo "$stale_pids" | xargs kill -9 2>/dev/null || true + fi +fi + # Find an available UI port -while ! is_port_free "$UI_PORT"; do - if [[ "${UI_PORT}" -ge "${MAX_PORT}" ]]; then - echo "No available port between ${UI_PORT:-8082} and ${MAX_PORT}" >&2 +if [[ "$UI_PORT_EXPLICIT" == "1" ]]; then + while ! is_port_free "$UI_PORT"; do + if [[ "${UI_PORT}" -ge "${MAX_PORT}" ]]; then + echo "No available port between ${UI_PORT} and ${MAX_PORT}" >&2 + exit 1 + fi + UI_PORT=$((UI_PORT + 1)) + info "UI port in use, trying ${UI_PORT}" + done +else + UI_PORT="$(pick_free_port_randomized "$UI_PORT_MIN" "$MAX_PORT" || true)" + if [[ -z "$UI_PORT" ]]; then + echo "No available UI port between ${UI_PORT_MIN} and ${MAX_PORT}" >&2 exit 1 fi - UI_PORT=$((UI_PORT + 1)) - info "UI port in use, trying ${UI_PORT}" -done + info "Selected UI port ${UI_PORT} (range ${UI_PORT_MIN}-${MAX_PORT})" +fi export UI_PORT export UI_BASE_URL="http://localhost:${UI_PORT}" @@ -535,16 +866,43 @@ export DISABLE_UPDATER=1 export DEV_WS_BRIDGE=1 WS_PORT_COUNT=2 -if [[ "$SCENARIO" == "syqure-flow" ]]; then +if [[ "$SCENARIO" == "syqure-flow" || "$SCENARIO" == "pipelines-multiparty" || "$SCENARIO" == "pipelines-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-allele-freq" ]]; then WS_PORT_COUNT=3 fi -WS_PORT_BASE="$(pick_ws_port_base "$WS_PORT_BASE" "${DEV_WS_BRIDGE_PORT_MAX:-3499}" "$WS_PORT_COUNT" || true)" +WS_PORT_MAX="${DEV_WS_BRIDGE_PORT_MAX:-3499}" +if [[ "$WS_PORT_BASE_EXPLICIT" == "1" ]]; then + WS_PORT_BASE="$(pick_ws_port_base "$WS_PORT_BASE" "$WS_PORT_MAX" "$WS_PORT_COUNT" || true)" +else + WS_PORT_BASE="$(pick_ws_port_base_randomized "$WS_PORT_MIN" "$WS_PORT_MAX" "$WS_PORT_COUNT" || true)" +fi if [[ -z "$WS_PORT_BASE" ]]; then - echo "Could not find ${WS_PORT_COUNT} free consecutive WS ports starting at ${DEV_WS_BRIDGE_PORT_BASE:-3333}" >&2 + if [[ "$WS_PORT_BASE_EXPLICIT" == "1" ]]; then + echo "Could not find ${WS_PORT_COUNT} free consecutive WS ports starting at ${DEV_WS_BRIDGE_PORT_BASE}" >&2 + else + echo "Could not find ${WS_PORT_COUNT} free consecutive WS ports in range ${WS_PORT_MIN}-${WS_PORT_MAX}" >&2 + fi exit 1 fi export DEV_WS_BRIDGE_PORT_BASE="$WS_PORT_BASE" +# Pick a free Syqure TCP proxy base unless explicitly configured +if [[ "$SCENARIO" == "syqure-flow" || "$SCENARIO" == "syqure-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-allele-freq" ]]; then + SYQURE_PARTY_COUNT=3 + if [[ "$SCENARIO" == "syqure-multiparty-flow" && "$SYQURE_MULTIPARTY_CLI_PARITY" == "1" && -z "${BV_SYQURE_PORT_BASE+x}" && -z "${SEQURE_COMMUNICATION_PORT+x}" ]]; then + info "Syqure CLI-parity mode: letting run_dynamic allocate BV_SYQURE_PORT_BASE from run_id." + elif [[ -z "${BV_SYQURE_PORT_BASE+x}" && -z "${SEQURE_COMMUNICATION_PORT+x}" ]]; then + SYQURE_PORT_BASE_MIN="${BV_SYQURE_PORT_BASE_MIN:-20000}" + SYQURE_PORT_BASE_MAX="${BV_SYQURE_PORT_BASE_MAX:-$(syqure_max_base_port "$SYQURE_PARTY_COUNT")}" + BV_SYQURE_PORT_BASE="$(pick_syqure_port_base_randomized "$SYQURE_PARTY_COUNT" "$SYQURE_PORT_BASE_MIN" "$SYQURE_PORT_BASE_MAX" || true)" + if [[ -z "$BV_SYQURE_PORT_BASE" ]]; then + echo "Could not find free Syqure port base in range ${SYQURE_PORT_BASE_MIN}-${SYQURE_PORT_BASE_MAX}" >&2 + exit 1 + fi + export BV_SYQURE_PORT_BASE + info "Selected Syqure TCP proxy base ${BV_SYQURE_PORT_BASE} (range ${SYQURE_PORT_BASE_MIN}-${SYQURE_PORT_BASE_MAX})" + fi +fi + CLIENT1_EMAIL="${CLIENT1_EMAIL:-client1@sandbox.local}" CLIENT2_EMAIL="${CLIENT2_EMAIL:-client2@sandbox.local}" AGG_EMAIL="${AGG_EMAIL:-aggregator@sandbox.local}" @@ -555,18 +913,19 @@ TAURI2_PID="" TAURI3_PID="" AGG_HOME="" AGG_CFG="" +SYQURE_WATCH_PID="" # Pick a free unified logger port unless explicitly configured -if [[ -n "${UNIFIED_LOG_PORT+x}" ]]; then +LOG_PORT_MAX="${UNIFIED_LOG_PORT_MAX:-9856}" +if [[ "$UNIFIED_LOG_PORT_EXPLICIT" == "1" ]]; then if ! is_port_free "$LOG_PORT"; then echo "UNIFIED_LOG_PORT=$LOG_PORT is already in use; choose a different port" >&2 exit 1 fi else - LOG_PORT_MAX="${UNIFIED_LOG_PORT_MAX:-9856}" - LOG_PORT="$(pick_free_port "$LOG_PORT" "$LOG_PORT_MAX" || true)" + LOG_PORT="$(pick_free_port_randomized "$LOG_PORT_MIN" "$LOG_PORT_MAX" || true)" if [[ -z "$LOG_PORT" ]]; then - echo "No available unified logger port between ${UNIFIED_LOG_PORT:-9756} and ${LOG_PORT_MAX}" >&2 + echo "No available unified logger port between ${LOG_PORT_MIN} and ${LOG_PORT_MAX}" >&2 exit 1 fi fi @@ -617,9 +976,91 @@ PY return 1 } -wait_for_listener "$LOG_PORT" "$LOGGER_PID" "unified logger" "${UNIFIED_LOG_WAIT_S:-5}" || { - echo "Unified logger failed to start on :${LOG_PORT}" >&2 - exit 1 +if ! wait_for_listener "$LOG_PORT" "$LOGGER_PID" "unified logger" "${UNIFIED_LOG_WAIT_S:-5}"; then + logger_required="${UNIFIED_LOG_REQUIRED:-0}" + logger_required_lc="$(printf '%s' "$logger_required" | tr '[:upper:]' '[:lower:]')" + if [[ "$logger_required_lc" == "1" || "$logger_required_lc" == "true" || "$logger_required_lc" == "yes" || "$logger_required_lc" == "on" ]]; then + echo "Unified logger failed to start on :${LOG_PORT}" >&2 + exit 1 + fi + echo "Warning: Unified logger failed to start on :${LOG_PORT}; continuing without unified WS logs" >&2 + kill "$LOGGER_PID" >/dev/null 2>&1 || true + LOGGER_PID="" + UNIFIED_LOG_WS_URL="" +fi + +is_enabled_flag() { + local value + value="$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]')" + [[ "$value" == "1" || "$value" == "true" || "$value" == "yes" || "$value" == "on" ]] +} + +stop_syqure_watchdog() { + if [[ -n "${SYQURE_WATCH_PID:-}" ]]; then + kill "$SYQURE_WATCH_PID" 2>/dev/null || true + wait "$SYQURE_WATCH_PID" 2>/dev/null || true + SYQURE_WATCH_PID="" + fi +} + +start_syqure_watchdog() { + local flow_name="$1" + local enabled="${SYQURE_MULTIPARTY_WATCH:-1}" + local interval="${SYQURE_MULTIPARTY_WATCH_INTERVAL:-2}" + local watcher="$ROOT_DIR/scripts/watch_syqure_multiparty.py" + if ! is_enabled_flag "$enabled"; then + return 0 + fi + if [[ ! -f "$watcher" ]]; then + info "Syqure watcher script not found: $watcher" + return 0 + fi + stop_syqure_watchdog + info "Starting Syqure watcher (flow=${flow_name}, interval=${interval}s)" + python3 "$watcher" \ + --sandbox "$SANDBOX_ROOT" \ + --flow "$flow_name" \ + --interval "$interval" \ + --prefix "[syq-watch]" & + SYQURE_WATCH_PID=$! +} + +kill_stale_syftboxd_locks() { + local sandbox_root="$1" + local matched=0 + + # Previous no-cleanup runs can leave embedded `bv syftboxd start --foreground` + # processes alive, which hold workspace locks and prevent devstack clients + # from starting their own daemons. + local pids + pids="$( + ps -axo pid=,command= 2>/dev/null | awk ' + /(^|[[:space:]])(bv|biovault)([[:space:]]|$)/ && + /(^|[[:space:]])syftboxd([[:space:]]|$)/ && + /(^|[[:space:]])start([[:space:]]|$)/ && + /(^|[[:space:]])--foreground([[:space:]]|$)/ { + print $1 + } + ' || true + )" + + while IFS= read -r pid; do + [[ -z "${pid:-}" ]] && continue + local cmd_with_env + cmd_with_env="$(ps eww -p "$pid" -o command= 2>/dev/null || true)" + if [[ -z "$cmd_with_env" ]]; then + continue + fi + if [[ "$cmd_with_env" == *"$sandbox_root"* ]]; then + info "Killing stale syftboxd lock holder pid=$pid (sandbox=$sandbox_root)" + kill "$pid" 2>/dev/null || true + matched=1 + fi + done <<< "$pids" + + if [[ "$matched" == "1" ]]; then + sleep 1 + fi } cleanup() { @@ -627,8 +1068,13 @@ cleanup() { return fi CLEANUP_ACTIVE=1 + stop_syqure_watchdog pause_for_interactive_exit + # Always kill processes — NO_CLEANUP only preserves sandbox files, never processes. + # Stale bv-desktop / syftboxd / syqure processes cause port conflicts and + # non-deterministic failures on subsequent runs. + if [[ -n "${SERVER_PID:-}" ]]; then info "Stopping static server" kill "$SERVER_PID" 2>/dev/null || true @@ -670,7 +1116,7 @@ cleanup() { local c2="${CLIENT2_EMAIL:-client2@sandbox.local}" local agg="${AGG_EMAIL:-aggregator@sandbox.local}" local sandbox_root="${SANDBOX_ROOT:-$BIOVAULT_DIR/sandbox}" - if [[ "$SCENARIO" == "syqure-flow" ]]; then + if [[ "$SCENARIO" == "syqure-flow" || "$SCENARIO" == "pipelines-multiparty" || "$SCENARIO" == "pipelines-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-allele-freq" ]]; then DEVSTACK_CLIENTS="${c1},${c2},${agg}" else DEVSTACK_CLIENTS="${c1},${c2}" @@ -683,6 +1129,26 @@ cleanup() { fi fi + # Final sweep: kill any process from this workspace that survived the graceful stops. + # This catches orphaned syftboxd daemons, syqure binaries, and child processes + # that the parent bv-desktop didn't clean up on exit. + local stale_count=0 + while IFS= read -r pid; do + if [[ -n "$pid" && "$pid" != "$$" ]]; then + kill "$pid" 2>/dev/null && ((stale_count++)) || true + fi + done < <(pgrep -f "$ROOT_DIR.*(bv-desktop|bv syftboxd|syqure|syftbox-rs)" 2>/dev/null || true) + if [[ "$stale_count" -gt 0 ]]; then + info "Killed $stale_count stale workspace processes" + sleep 1 + # Force-kill anything that didn't respond to SIGTERM + while IFS= read -r pid; do + if [[ -n "$pid" && "$pid" != "$$" ]]; then + kill -9 "$pid" 2>/dev/null || true + fi + done < <(pgrep -f "$ROOT_DIR.*(bv-desktop|bv syftboxd|syqure|syftbox-rs)" 2>/dev/null || true) + fi + # Close out any in-progress timers so failures still report partial durations. if timing_enabled; then while [[ "${#TIMER_LABEL_STACK[@]}" -gt 0 ]]; do @@ -711,8 +1177,11 @@ cleanup() { trap cleanup EXIT INT TERM if [[ "$SCENARIO" != "profiles-mock" && "$SCENARIO" != "files-cli" ]]; then + # Clear stale syftboxd lock holders from prior no-cleanup runs before touching devstack. + kill_stale_syftboxd_locks "$SANDBOX_ROOT" + # Start devstack with two or three clients (reset by default to avoid stale state) - if [[ "$SCENARIO" == "syqure-flow" ]]; then + if [[ "$SCENARIO" == "syqure-flow" || "$SCENARIO" == "pipelines-multiparty" || "$SCENARIO" == "pipelines-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-allele-freq" ]]; then info "Ensuring SyftBox devstack with three clients (reset=${DEVSTACK_RESET})" DEVSTACK_CLIENTS="${CLIENT1_EMAIL},${CLIENT2_EMAIL},${AGG_EMAIL}" else @@ -786,7 +1255,7 @@ if [[ "$SCENARIO" != "profiles-mock" && "$SCENARIO" != "files-cli" ]]; then CLIENT2_HOME="$(parse_field "$CLIENT2_EMAIL" home_path)" CLIENT1_CFG="$(parse_field "$CLIENT1_EMAIL" config)" CLIENT2_CFG="$(parse_field "$CLIENT2_EMAIL" config)" - if [[ "$SCENARIO" == "syqure-flow" ]]; then + if [[ "$SCENARIO" == "syqure-flow" || "$SCENARIO" == "pipelines-multiparty" || "$SCENARIO" == "pipelines-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-allele-freq" ]]; then AGG_HOME="$(parse_field "$AGG_EMAIL" home_path)" AGG_CFG="$(parse_field "$AGG_EMAIL" config)" fi @@ -799,7 +1268,7 @@ PY info "Client1 home: $CLIENT1_HOME" info "Client2 home: $CLIENT2_HOME" - if [[ "$SCENARIO" == "syqure-flow" ]]; then + if [[ "$SCENARIO" == "syqure-flow" || "$SCENARIO" == "pipelines-multiparty" || "$SCENARIO" == "pipelines-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-allele-freq" ]]; then info "Aggregator home: $AGG_HOME" fi info "Server URL: $SERVER_URL" @@ -904,7 +1373,7 @@ start_static_server() { local port_check_count=0 while ! is_port_free "$UI_PORT"; do if [[ "${UI_PORT}" -ge "${MAX_PORT}" ]]; then - echo "No available port between 8082 and ${MAX_PORT}" >&2 + echo "No available port between ${UI_PORT_MIN} and ${MAX_PORT}" >&2 exit 1 fi info "UI port ${UI_PORT} now in use, trying $((UI_PORT + 1))" @@ -1059,6 +1528,219 @@ assert_tauri_binary_fresh() { info "[DEBUG] Tauri binary is up to date (no newer source files found)" } +syqure_platform_id() { + local os_name arch_name os_label arch_label + os_name="$(uname -s | tr '[:upper:]' '[:lower:]')" + arch_name="$(uname -m | tr '[:upper:]' '[:lower:]')" + os_label="$os_name" + arch_label="$arch_name" + case "$os_name" in + darwin) os_label="macos" ;; + linux) os_label="linux" ;; + esac + case "$arch_name" in + arm64|aarch64) arch_label="arm64" ;; + x86_64|amd64|i386|i686) arch_label="x86" ;; + esac + echo "${os_label}-${arch_label}" +} + +syqure_repo_root_from_bin() { + local bin_path="$1" + if [[ -z "$bin_path" ]]; then + return 0 + fi + if [[ "$bin_path" == */target/release/syqure || "$bin_path" == */target/debug/syqure ]]; then + (cd "$(dirname "$bin_path")/../.." && pwd -P) + return 0 + fi + echo "" +} + +syqure_plugin_lib_name() { + case "$(uname -s | tr '[:upper:]' '[:lower:]')" in + darwin) echo "libsequre.dylib" ;; + *) echo "libsequre.so" ;; + esac +} + +configure_syqure_runtime_env() { + if [[ "$SCENARIO" != "syqure-flow" && "$SCENARIO" != "syqure-multiparty-flow" && "$SCENARIO" != "syqure-multiparty-allele-freq" ]]; then + return 0 + fi + if [[ -z "${SEQURE_NATIVE_BIN:-}" || ! -x "${SEQURE_NATIVE_BIN:-}" ]]; then + return 0 + fi + local syq_root platform codon_candidate codon_expected_suffix + syq_root="$(syqure_repo_root_from_bin "$SEQURE_NATIVE_BIN")" + if [[ -z "$syq_root" || ! -d "$syq_root" ]]; then + return 0 + fi + + platform="$(syqure_platform_id)" + codon_candidate="$syq_root/bin/${platform}/codon" + codon_expected_suffix="/bin/${platform}/codon" + + # Fail-fast guard: if CODON_PATH is set but clearly for another platform + # (e.g. macos-arm64 on Linux), override with current-platform bundle. + if [[ -n "${CODON_PATH:-}" ]]; then + if [[ "$CODON_PATH" != *"$codon_expected_suffix" ]]; then + if [[ -d "$codon_candidate/lib/codon" ]]; then + info "Overriding mismatched CODON_PATH=$CODON_PATH with platform path $codon_candidate" + export CODON_PATH="$codon_candidate" + else + echo "ERROR: CODON_PATH points to another platform: $CODON_PATH" >&2 + echo "Expected current platform bundle under: $codon_candidate" >&2 + echo "Fix CODON_PATH or build/install syqure codon bundle for $(syqure_platform_id)." >&2 + exit 1 + fi + fi + fi + + # Prefer local prebuilt Codon/Sequre tree in this workspace. + if [[ -z "${CODON_PATH:-}" ]]; then + if [[ -d "$codon_candidate/lib/codon" ]]; then + export CODON_PATH="$codon_candidate" + fi + fi + + # Hard fail before launching Tauri/Syqure if CODON_PATH is invalid. + if [[ -n "${CODON_PATH:-}" ]]; then + if [[ ! -d "${CODON_PATH}/lib/codon" ]]; then + echo "ERROR: CODON_PATH is invalid (missing lib/codon): $CODON_PATH" >&2 + exit 1 + fi + fi + + info "Syqure runtime env: SYQURE_SKIP_BUNDLE=${SYQURE_SKIP_BUNDLE:-unset} CODON_PATH=${CODON_PATH:-unset}" +} + +assert_syqure_binary_fresh() { + if [[ "$SCENARIO" != "syqure-flow" && "$SCENARIO" != "syqure-multiparty-flow" && "$SCENARIO" != "syqure-multiparty-allele-freq" ]]; then + return 0 + fi + if [[ -z "${SEQURE_NATIVE_BIN:-}" || ! -x "${SEQURE_NATIVE_BIN:-}" ]]; then + return 0 + fi + + local syq_root profile newer auto_rebuild + syq_root="$(syqure_repo_root_from_bin "$SEQURE_NATIVE_BIN")" + if [[ -z "$syq_root" || ! -d "$syq_root" ]]; then + return 0 + fi + profile="release" + if [[ "$SEQURE_NATIVE_BIN" == */target/debug/* ]]; then + profile="debug" + fi + info "[DEBUG] assert_syqure_binary_fresh: checking $SEQURE_NATIVE_BIN ($profile)" + + local candidates=( + "$syq_root/syqure/src" + "$syq_root/syqure/build.rs" + "$syq_root/syqure/Cargo.toml" + "$syq_root/Cargo.toml" + "$syq_root/Cargo.lock" + "$syq_root/sequre/stdlib" + "$syq_root/sequre/plugin.toml" + ) + newer="" + for p in "${candidates[@]}"; do + if [[ -f "$p" ]]; then + if [[ "$p" -nt "$SEQURE_NATIVE_BIN" ]]; then + newer="$p" + break + fi + elif [[ -d "$p" ]]; then + newer="$(find "$p" -type f -newer "$SEQURE_NATIVE_BIN" -print -quit 2>/dev/null || true)" + if [[ -n "$newer" ]]; then + break + fi + fi + done + if [[ -z "$newer" ]]; then + info "[DEBUG] Syqure binary is up to date" + return 0 + fi + + auto_rebuild="${AUTO_REBUILD_SYQURE:-1}" + info "[DEBUG] Syqure binary is older than sources (e.g. $newer); AUTO_REBUILD_SYQURE=$auto_rebuild" + if [[ "$auto_rebuild" == "0" || "$auto_rebuild" == "false" || "$auto_rebuild" == "no" ]]; then + echo "Syqure rebuild required: (cd $syq_root && cargo build -p syqure --release)" >&2 + exit 1 + fi + + # A plain `cargo build` can be a no-op for Sequre/Codon source changes because + # Cargo does not track those files directly. Force a clean rebuild whenever we + # already know the binary is stale to ensure the runtime bundle is refreshed. + info "[DEBUG] Forcing clean syqure rebuild because source mtime is newer than binary" + (cd "$syq_root" && cargo clean -p syqure) >&2 || true + + timer_push "Cargo build (syqure release)" + if ! (cd "$syq_root" && cargo build -p syqure --release) >&2; then + info "Syqure release build failed; rebuilding bundle then retrying" + (cd "$syq_root" && ZSTD_NBTHREADS=1 ./syqure_bins.sh) >&2 + (cd "$syq_root" && cargo build -p syqure --release) >&2 + fi + timer_pop +} + +assert_syqure_bundle_ready() { + if [[ "$SCENARIO" != "syqure-flow" && "$SCENARIO" != "syqure-multiparty-flow" && "$SCENARIO" != "syqure-multiparty-allele-freq" ]]; then + return 0 + fi + if [[ -z "${SEQURE_NATIVE_BIN:-}" || ! -x "${SEQURE_NATIVE_BIN:-}" ]]; then + return 0 + fi + + local syq_root platform codon_root plugin_name + syq_root="$(syqure_repo_root_from_bin "$SEQURE_NATIVE_BIN")" + if [[ -z "$syq_root" || ! -d "$syq_root" ]]; then + return 0 + fi + platform="$(syqure_platform_id)" + codon_root="$syq_root/bin/${platform}/codon" + plugin_name="$(syqure_plugin_lib_name)" + + local missing=0 + local required=( + "$codon_root/bin/codon" + "$codon_root/lib/codon/stdlib" + "$codon_root/lib/codon/plugins/sequre/build/$plugin_name" + ) + local p + for p in "${required[@]}"; do + if [[ ! -e "$p" ]]; then + info "[DEBUG] Missing syqure bundle asset: $p" + missing=1 + fi + done + + if [[ "$missing" -eq 0 ]]; then + info "[DEBUG] Syqure bundle is present for platform $platform" + return 0 + fi + + local auto_rebuild_bins="${AUTO_REBUILD_SYQURE_BINS:-1}" + if [[ "$auto_rebuild_bins" == "0" || "$auto_rebuild_bins" == "false" || "$auto_rebuild_bins" == "no" ]]; then + echo "Syqure platform bundle missing for $platform." >&2 + echo "Run: (cd $syq_root && ZSTD_NBTHREADS=1 ./syqure_bins.sh)" >&2 + exit 1 + fi + + info "Rebuilding syqure platform bundle via syqure_bins.sh (platform=$platform)" + timer_push "Syqure bundle build" + (cd "$syq_root" && ZSTD_NBTHREADS=1 ./syqure_bins.sh) >&2 + timer_pop + + for p in "${required[@]}"; do + if [[ ! -e "$p" ]]; then + echo "Syqure bundle build incomplete, missing: $p" >&2 + exit 1 + fi + done + info "[DEBUG] Syqure bundle verified after rebuild" +} + launch_instance() { local email="$1" local home="$2" @@ -1168,6 +1850,34 @@ wait_ws() { } start_tauri_instances() { + if [[ "$SCENARIO" == "syqure-flow" || "$SCENARIO" == "syqure-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-allele-freq" ]]; then + if [[ -z "${SEQURE_NATIVE_BIN:-}" ]]; then + local syqure_candidates=( + "$WORKSPACE_ROOT/syqure/target/release/syqure" + "$WORKSPACE_ROOT/syqure/target/debug/syqure" + "$BIOVAULT_DIR/../syqure/target/release/syqure" + "$BIOVAULT_DIR/../syqure/target/debug/syqure" + ) + local candidate + for candidate in "${syqure_candidates[@]}"; do + if [[ -x "$candidate" ]]; then + export SEQURE_NATIVE_BIN="$candidate" + info "Using SEQURE_NATIVE_BIN=$SEQURE_NATIVE_BIN" + break + fi + done + fi + + if [[ -z "${SEQURE_NATIVE_BIN:-}" && -z "${BV_SYQURE_USE_DOCKER:-}" ]]; then + export BV_SYQURE_USE_DOCKER=1 + info "Syqure binary not found; defaulting BV_SYQURE_USE_DOCKER=1" + fi + + configure_syqure_runtime_env + assert_syqure_binary_fresh + assert_syqure_bundle_ready + fi + assert_tauri_binary_present assert_tauri_binary_fresh @@ -1193,7 +1903,7 @@ start_tauri_instances() { exit 1 } - if [[ "$SCENARIO" == "syqure-flow" ]]; then + if [[ "$SCENARIO" == "syqure-flow" || "$SCENARIO" == "pipelines-multiparty" || "$SCENARIO" == "pipelines-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-allele-freq" ]]; then info "Launching Tauri for aggregator on WS port $((WS_PORT_BASE + 2))" TAURI3_PID=$(launch_instance "$AGG_EMAIL" "$AGG_HOME" "$AGG_CFG" "$((WS_PORT_BASE + 2))") info "Waiting for aggregator WS bridge..." @@ -1208,7 +1918,7 @@ start_tauri_instances() { info "Client1 UI: ${UI_BASE_URL}?ws=${WS_PORT_BASE}&real=1" info "Client2 UI: ${UI_BASE_URL}?ws=$((WS_PORT_BASE + 1))&real=1" - if [[ "$SCENARIO" == "syqure-flow" ]]; then + if [[ "$SCENARIO" == "syqure-flow" || "$SCENARIO" == "pipelines-multiparty" || "$SCENARIO" == "pipelines-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-flow" || "$SCENARIO" == "syqure-multiparty-allele-freq" ]]; then info "Aggregator UI: ${UI_BASE_URL}?ws=$((WS_PORT_BASE + 2))&real=1" fi timer_pop @@ -1335,6 +2045,58 @@ run_ui_grep() { "${cmd[@]}" | tee -a "$LOG_FILE" } +prepare_allele_freq_ui_inputs() { + local gen_script="$ROOT_DIR/biovault/tests/scripts/gen_allele_freq_data.sh" + local file_count="${ALLELE_FREQ_COUNT:-1}" + local force_regen="${ALLELE_FREQ_FORCE_REGEN:-0}" + local c1_out="$CLIENT1_HOME/private/app_data/biovault/allele-freq-data" + local c2_out="$CLIENT2_HOME/private/app_data/biovault/allele-freq-data" + + if [[ ! -f "$gen_script" ]]; then + error "Missing allele-freq generator script: $gen_script" + exit 1 + fi + + timer_push "Prepare allele-freq inputs" + info "Preparing allele-freq synthetic inputs for UI run (count=${file_count})" + + local -a force_args=() + if [[ "$force_regen" == "1" || "$force_regen" == "true" || "$force_regen" == "yes" ]]; then + force_args=(--force) + fi + + local -a gen_cmd_client1=( + bash "$gen_script" + --output-dir "$c1_out" + --count "$file_count" + --seed "${ALLELE_FREQ_CLIENT1_SEED:-42}" + --apol1-het "${ALLELE_FREQ_CLIENT1_APOL1_HET:-0.6}" + --apol1-hom-alt "${ALLELE_FREQ_CLIENT1_APOL1_HOM_ALT:-0.2}" + --no-call-frequency "${ALLELE_FREQ_NO_CALL_FREQUENCY:-0.2}" + --no-call-token "${ALLELE_FREQ_NO_CALL_TOKEN:--}" + ) + append_array_items gen_cmd_client1 force_args + "${gen_cmd_client1[@]}" >>"$LOG_FILE" 2>&1 + + local -a gen_cmd_client2=( + bash "$gen_script" + --output-dir "$c2_out" + --count "$file_count" + --seed "${ALLELE_FREQ_CLIENT2_SEED:-43}" + --thal-het "${ALLELE_FREQ_CLIENT2_THAL_HET:-0.5}" + --thal-hom-alt "${ALLELE_FREQ_CLIENT2_THAL_HOM_ALT:-0.3}" + --no-call-frequency "${ALLELE_FREQ_NO_CALL_FREQUENCY:-0.2}" + --no-call-token "${ALLELE_FREQ_NO_CALL_TOKEN:--}" + ) + append_array_items gen_cmd_client2 force_args + "${gen_cmd_client2[@]}" >>"$LOG_FILE" 2>&1 + + info "Prepared allele-freq inputs:" + info " client1: ${c1_out}" + info " client2: ${c2_out}" + timer_pop +} + sanitize_playwright_args() { # If a user accidentally passes an empty --grep-invert pattern, Playwright will exclude everything # (empty regex matches all) and report "No tests found". Drop that footgun. @@ -1846,6 +2608,198 @@ PY while true; do sleep 1; done fi ;; + pipelines-multiparty) + start_static_server + start_tauri_instances + + info "=== Multiparty Messaging Test ===" + info "Three clients will exchange keys and hello messages." + info "" + info "Open these URLs in your browser:" + info " Client1: ${UI_BASE_URL}?ws=${WS_PORT_BASE}&real=1" + info " Client2: ${UI_BASE_URL}?ws=$((WS_PORT_BASE + 1))&real=1" + info " Client3: ${UI_BASE_URL}?ws=$((WS_PORT_BASE + 2))&real=1" + info "" + info "Emails: ${CLIENT1_EMAIL}, ${CLIENT2_EMAIL}, ${AGG_EMAIL}" + + # Run the multiparty messaging test + timer_push "Playwright: @pipelines-multiparty" + # Match only the exact tag and exclude @pipelines-multiparty-flow. + run_ui_grep "@pipelines-multiparty(?!-)" "INTERACTIVE_MODE=$INTERACTIVE_MODE" + timer_pop + + # In wait mode, keep everything running + if [[ "$WAIT_MODE" == "1" ]]; then + info "Wait mode: Servers will stay running. Press Ctrl+C to exit." + while true; do sleep 1; done + fi + ;; + pipelines-multiparty-flow) + start_static_server + start_tauri_instances + + info "=== Multiparty Flow Test ===" + info "Three clients will execute a multiparty flow with manual steps." + info "" + info "Open these URLs in your browser:" + info " Client1: ${UI_BASE_URL}?ws=${WS_PORT_BASE}&real=1" + info " Client2: ${UI_BASE_URL}?ws=$((WS_PORT_BASE + 1))&real=1" + info " Aggregator: ${UI_BASE_URL}?ws=$((WS_PORT_BASE + 2))&real=1" + info "" + info "Emails: ${CLIENT1_EMAIL}, ${CLIENT2_EMAIL}, ${AGG_EMAIL}" + + # Run the multiparty flow test + timer_push "Playwright: @pipelines-multiparty-flow" + run_ui_grep "@pipelines-multiparty-flow" "INTERACTIVE_MODE=$INTERACTIVE_MODE" + timer_pop + + # In wait mode, keep everything running + if [[ "$WAIT_MODE" == "1" ]]; then + info "Wait mode: Servers will stay running. Press Ctrl+C to exit." + while true; do sleep 1; done + fi + ;; + syqure-multiparty-flow) + # Keep Syqure runtime env aligned with the distributed scenario defaults. + # Allow callers to override explicitly via environment. + export BV_SYFTBOX_HOTLINK="${BV_SYFTBOX_HOTLINK:-1}" + export BV_SYFTBOX_HOTLINK_SOCKET_ONLY="${BV_SYFTBOX_HOTLINK_SOCKET_ONLY:-1}" + export BV_SYFTBOX_HOTLINK_TCP_PROXY="${BV_SYFTBOX_HOTLINK_TCP_PROXY:-1}" + export BV_SYFTBOX_HOTLINK_QUIC="${BV_SYFTBOX_HOTLINK_QUIC:-1}" + export BV_SYFTBOX_HOTLINK_QUIC_ONLY="${BV_SYFTBOX_HOTLINK_QUIC_ONLY:-0}" + export SYFTBOX_HOTLINK="${SYFTBOX_HOTLINK:-$BV_SYFTBOX_HOTLINK}" + export SYFTBOX_HOTLINK_SOCKET_ONLY="${SYFTBOX_HOTLINK_SOCKET_ONLY:-$BV_SYFTBOX_HOTLINK_SOCKET_ONLY}" + export SYFTBOX_HOTLINK_TCP_PROXY="${SYFTBOX_HOTLINK_TCP_PROXY:-$BV_SYFTBOX_HOTLINK_TCP_PROXY}" + export SYFTBOX_HOTLINK_QUIC="${SYFTBOX_HOTLINK_QUIC:-$BV_SYFTBOX_HOTLINK_QUIC}" + export SYFTBOX_HOTLINK_QUIC_ONLY="${SYFTBOX_HOTLINK_QUIC_ONLY:-$BV_SYFTBOX_HOTLINK_QUIC_ONLY}" + if [[ -n "${SYQURE_SKIP_BUNDLE:-}" ]]; then + export SYQURE_SKIP_BUNDLE + fi + if [[ -n "${SYFTBOX_HOTLINK_DEBUG:-}" ]]; then + export SYFTBOX_HOTLINK_DEBUG + fi + if [[ -n "${SYQURE_DEBUG:-}" ]]; then + export SYQURE_DEBUG + fi + info "Syqure UI env: HOTLINK=${BV_SYFTBOX_HOTLINK:-unset} SOCKET_ONLY=${BV_SYFTBOX_HOTLINK_SOCKET_ONLY:-unset} QUIC=${BV_SYFTBOX_HOTLINK_QUIC:-unset} QUIC_ONLY=${BV_SYFTBOX_HOTLINK_QUIC_ONLY:-unset} SKIP_BUNDLE=${SYQURE_SKIP_BUNDLE:-unset}" + + start_static_server + start_tauri_instances + + # Mirror syqure-distributed mode/transport selection for secure-aggregate runtime. + MODE="${BV_SYQURE_AGG_MODE:-smpc}" + TRANSPORT="${BV_SYQURE_TRANSPORT:-hotlink}" + MODULE_YAML="$ROOT_DIR/biovault/tests/scenarios/syqure-flow/modules/secure-aggregate/module.yaml" + case "$MODE" in + he) ENTRY="he_aggregate.codon" ;; + smpc|"") ENTRY="smpc_aggregate.codon" ;; + *) + error "Unknown BV_SYQURE_AGG_MODE: $MODE (expected smpc|he)" + exit 1 + ;; + esac + python3 -c "import pathlib,re; path = pathlib.Path(r'${MODULE_YAML}'); text = path.read_text(); text = text.replace('entrypoint: smpc_aggregate.codon', f'entrypoint: ${ENTRY}'); text = text.replace('entrypoint: he_aggregate.codon', f'entrypoint: ${ENTRY}'); text = re.sub(r'transport: .*', f'transport: ${TRANSPORT}', text); path.write_text(text)" + info "Syqure aggregation mode: ${MODE} (entrypoint: ${ENTRY}) transport: ${TRANSPORT}" + + info "=== Syqure Multiparty Flow Test ===" + info "Three clients will execute biovault/tests/scenarios/syqure-flow/flow.yaml via collaborative run." + if [[ "$SYQURE_MULTIPARTY_SECURE_ONLY" == "1" ]]; then + info "Secure-only mode: running only secure_aggregate with seeded fixed inputs." + fi + if [[ "$SYQURE_MULTIPARTY_CLI_PARITY" == "1" ]]; then + info "CLI-parity mode: backend=${BV_SYFTBOX_BACKEND:-unset} BV_SYQURE_TCP_PROXY=${BV_SYQURE_TCP_PROXY:-unset} BV_SYQURE_PORT_BASE=${BV_SYQURE_PORT_BASE:-auto}." + fi + info "" + info "Open these URLs in your browser:" + info " Client1: ${UI_BASE_URL}?ws=${WS_PORT_BASE}&real=1" + info " Client2: ${UI_BASE_URL}?ws=$((WS_PORT_BASE + 1))&real=1" + info " Aggregator: ${UI_BASE_URL}?ws=$((WS_PORT_BASE + 2))&real=1" + info "" + info "Emails: ${CLIENT1_EMAIL}, ${CLIENT2_EMAIL}, ${AGG_EMAIL}" + + timer_push "Playwright: @syqure-multiparty-flow" + start_syqure_watchdog "syqure-flow" + run_ui_grep "@syqure-multiparty-flow" "INTERACTIVE_MODE=$INTERACTIVE_MODE" "SYQURE_MULTIPARTY_SECURE_ONLY=$SYQURE_MULTIPARTY_SECURE_ONLY" "SYQURE_MULTIPARTY_CLI_PARITY=$SYQURE_MULTIPARTY_CLI_PARITY" + stop_syqure_watchdog + timer_pop + + if [[ "$WAIT_MODE" == "1" ]]; then + info "Wait mode: Servers will stay running. Press Ctrl+C to exit." + while true; do sleep 1; done + fi + ;; + syqure-multiparty-allele-freq) + # Keep Syqure runtime env aligned with the distributed scenario defaults. + # Allow callers to override explicitly via environment. + export BV_AGG_THREADS="${BV_AGG_THREADS:-2}" + export BV_EMIT_MAX_FORKS="${BV_EMIT_MAX_FORKS:-2}" + export BV_SYFTBOX_HOTLINK="${BV_SYFTBOX_HOTLINK:-1}" + export BV_SYFTBOX_HOTLINK_SOCKET_ONLY="${BV_SYFTBOX_HOTLINK_SOCKET_ONLY:-1}" + export BV_SYFTBOX_HOTLINK_TCP_PROXY="${BV_SYFTBOX_HOTLINK_TCP_PROXY:-1}" + export BV_SYFTBOX_HOTLINK_QUIC="${BV_SYFTBOX_HOTLINK_QUIC:-1}" + export BV_SYFTBOX_HOTLINK_QUIC_ONLY="${BV_SYFTBOX_HOTLINK_QUIC_ONLY:-0}" + export SYFTBOX_HOTLINK="${SYFTBOX_HOTLINK:-$BV_SYFTBOX_HOTLINK}" + export SYFTBOX_HOTLINK_SOCKET_ONLY="${SYFTBOX_HOTLINK_SOCKET_ONLY:-$BV_SYFTBOX_HOTLINK_SOCKET_ONLY}" + export SYFTBOX_HOTLINK_TCP_PROXY="${SYFTBOX_HOTLINK_TCP_PROXY:-$BV_SYFTBOX_HOTLINK_TCP_PROXY}" + export SYFTBOX_HOTLINK_QUIC="${SYFTBOX_HOTLINK_QUIC:-$BV_SYFTBOX_HOTLINK_QUIC}" + export SYFTBOX_HOTLINK_QUIC_ONLY="${SYFTBOX_HOTLINK_QUIC_ONLY:-$BV_SYFTBOX_HOTLINK_QUIC_ONLY}" + if [[ -n "${SYQURE_SKIP_BUNDLE:-}" ]]; then + export SYQURE_SKIP_BUNDLE + fi + if [[ -n "${SYFTBOX_HOTLINK_DEBUG:-}" ]]; then + export SYFTBOX_HOTLINK_DEBUG + fi + if [[ -n "${SYQURE_DEBUG:-}" ]]; then + export SYQURE_DEBUG + fi + info "Syqure UI env: HOTLINK=${BV_SYFTBOX_HOTLINK:-unset} SOCKET_ONLY=${BV_SYFTBOX_HOTLINK_SOCKET_ONLY:-unset} QUIC=${BV_SYFTBOX_HOTLINK_QUIC:-unset} QUIC_ONLY=${BV_SYFTBOX_HOTLINK_QUIC_ONLY:-unset} SKIP_BUNDLE=${SYQURE_SKIP_BUNDLE:-unset}" + info "Allele-freq exec env: BV_AGG_THREADS=${BV_AGG_THREADS} BV_EMIT_MAX_FORKS=${BV_EMIT_MAX_FORKS}" + if [[ -n "${BV_ALLELE_FREQ_FORCE_ARRAY_LENGTH:-}" ]]; then + info "Allele-freq debug override: BV_ALLELE_FREQ_FORCE_ARRAY_LENGTH=${BV_ALLELE_FREQ_FORCE_ARRAY_LENGTH}" + fi + + start_static_server + start_tauri_instances + prepare_allele_freq_ui_inputs + + MODE="${BV_SYQURE_AGG_MODE:-smpc}" + TRANSPORT="${BV_SYQURE_TRANSPORT:-hotlink}" + MODULE_YAML="$ROOT_DIR/biovault/flows/multiparty-allele-freq/modules/secure-aggregate/module.yaml" + case "$MODE" in + smpc|"") ENTRY="secure_aggregate.codon" ;; + he) + error "BV_SYQURE_AGG_MODE=he is not supported for multiparty-allele-freq (only smpc)" + exit 1 + ;; + *) + error "Unknown BV_SYQURE_AGG_MODE: $MODE (expected smpc|he)" + exit 1 + ;; + esac + python3 -c "import pathlib,re; path = pathlib.Path(r'${MODULE_YAML}'); text = path.read_text(); text = re.sub(r'entrypoint: [A-Za-z0-9_]+\\.codon', f'entrypoint: ${ENTRY}', text); text = re.sub(r'transport: .*', f'transport: ${TRANSPORT}', text); path.write_text(text)" + info "Syqure aggregation mode: ${MODE} (entrypoint: ${ENTRY}) transport: ${TRANSPORT}" + + info "=== Syqure Multiparty Allele-Freq Flow Test ===" + info "Three clients will execute biovault/flows/multiparty-allele-freq/flow.yaml via collaborative run." + info "" + info "Open these URLs in your browser:" + info " Client1: ${UI_BASE_URL}?ws=${WS_PORT_BASE}&real=1" + info " Client2: ${UI_BASE_URL}?ws=$((WS_PORT_BASE + 1))&real=1" + info " Aggregator: ${UI_BASE_URL}?ws=$((WS_PORT_BASE + 2))&real=1" + info "" + info "Emails: ${CLIENT1_EMAIL}, ${CLIENT2_EMAIL}, ${AGG_EMAIL}" + + timer_push "Playwright: @syqure-multiparty-allele-freq" + start_syqure_watchdog "multiparty-allele-freq" + run_ui_grep "@syqure-multiparty-allele-freq" "INTERACTIVE_MODE=$INTERACTIVE_MODE" + stop_syqure_watchdog + timer_pop + + if [[ "$WAIT_MODE" == "1" ]]; then + info "Wait mode: Servers will stay running. Press Ctrl+C to exit." + while true; do sleep 1; done + fi + ;; file-transfer) start_static_server start_tauri_instances diff --git a/tests/ui/messages-multiparty-flow.spec.ts b/tests/ui/messages-multiparty-flow.spec.ts new file mode 100644 index 00000000..d968fd2c --- /dev/null +++ b/tests/ui/messages-multiparty-flow.spec.ts @@ -0,0 +1,1692 @@ +/** + * Multiparty Flow Test (Three Clients) + * Tests the multiparty flow invitation and step-by-step execution: + * 1. Three clients (client1, client2, aggregator) onboard + * 2. All three exchange keys + * 3. Aggregator creates a group chat and sends flow invitation + * 4. Client1 and client2 receive invitation, import flow, and join + * 5. Each participant goes to Runs tab and executes their steps + * 6. Shared results appear in the chat + * + * Usage: + * ./test-scenario.sh --pipelines-multiparty-flow --interactive + * + * @tag pipelines-multiparty-flow + */ +import { expect, test, type Page } from './playwright-fixtures' +import WebSocket from 'ws' +import * as fs from 'node:fs' +import { waitForAppReady, ensureProfileSelected } from './test-helpers.js' +import { setWsPort, completeOnboarding, ensureLogSocket, log } from './onboarding-helper.js' + +const TEST_TIMEOUT = 300_000 // 5 minutes max +const UI_TIMEOUT = 15_000 +const MESSAGE_TIMEOUT = 90_000 +const SYNC_INTERVAL = 500 + +test.describe.configure({ timeout: TEST_TIMEOUT }) + +interface Backend { + invoke: (cmd: string, args?: Record, timeoutMs?: number) => Promise + close: () => Promise +} + +async function connectBackend(port: number): Promise { + const socket = new WebSocket(`ws://localhost:${port}`) + await new Promise((resolve, reject) => { + const timeout = setTimeout( + () => reject(new Error(`WS connect timeout on port ${port}`)), + 10_000, + ) + socket.once('open', () => { + clearTimeout(timeout) + resolve() + }) + socket.once('error', (err) => { + clearTimeout(timeout) + reject(err) + }) + }) + + let nextId = 0 + const pending = new Map void; reject: (e: any) => void }>() + + socket.on('message', (data) => { + let parsed: any + try { + parsed = JSON.parse(data.toString()) + } catch { + return + } + const entry = pending.get(parsed?.id) + if (!entry) return + pending.delete(parsed.id) + if (parsed.error) entry.reject(new Error(parsed.error)) + else entry.resolve(parsed.result) + }) + + function invoke(cmd: string, args: Record = {}, timeoutMs = 30_000) { + const id = ++nextId + socket.send(JSON.stringify({ id, cmd, args })) + return new Promise((resolve, reject) => { + pending.set(id, { resolve, reject }) + setTimeout(() => { + if (!pending.has(id)) return + pending.delete(id) + reject(new Error(`WS invoke timeout: ${cmd}`)) + }, timeoutMs) + }) + } + + async function close() { + if (socket.readyState !== WebSocket.OPEN) return + await new Promise((resolve) => { + socket.once('close', () => resolve()) + socket.close() + }) + } + + return { invoke, close } +} + +function normalizeMetadata(metadata: any): any { + if (!metadata) return null + if (typeof metadata === 'string') { + try { + return JSON.parse(metadata) + } catch { + return null + } + } + return metadata +} + +async function waitForThreadMessageMatching( + backend: Backend, + threadId: string, + predicate: (msg: any) => boolean, + label: string, + timeoutMs = MESSAGE_TIMEOUT, +): Promise { + const start = Date.now() + while (Date.now() - start < timeoutMs) { + try { + await backend.invoke('sync_messages_with_failures') + } catch { + // Ignore transient sync failures during polling + } + const msgs = await backend.invoke('get_thread_messages', { threadId }) + if (Array.isArray(msgs)) { + const found = msgs.find((msg: any) => predicate(msg)) + if (found) return found + } + await new Promise((r) => setTimeout(r, SYNC_INTERVAL)) + } + throw new Error(`Timed out waiting for thread message: ${label}`) +} + +async function importContactWithRetry( + backend: Backend, + identity: string, + label: string, + timeoutMs = 60_000, +) { + const start = Date.now() + let lastErr: any = null + while (Date.now() - start < timeoutMs) { + try { + await backend.invoke('network_import_contact', { identity }) + return + } catch (err: any) { + lastErr = err + const msg = String(err?.message || err || '') + if (!msg.includes('DID not found')) { + throw err + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 1000)) + } + } + throw new Error(`${label}: timed out importing ${identity}. last error: ${lastErr}`) +} + +function extractReadPrincipalsFromSyftPub(content: string): string[] { + const lines = content.split(/\r?\n/) + const principals: string[] = [] + let inReadBlock = false + + for (const line of lines) { + const trimmed = line.trim() + if (/^read:\s*$/.test(trimmed)) { + inReadBlock = true + continue + } + if (!inReadBlock) continue + if (/^[a-zA-Z_]+:\s*$/.test(trimmed) && !trimmed.startsWith('-')) { + break + } + const match = trimmed.match(/^-+\s*(.+)$/) + if (match && match[1]) { + principals.push(match[1].trim()) + } + } + + return [...new Set(principals)] +} + +type ViewerContext = { + label: string + backend: Backend +} + +async function waitForViewerCondition( + viewer: ViewerContext, + label: string, + check: () => Promise, + timeoutMs = 60_000, +): Promise { + const start = Date.now() + let lastError = '' + + while (Date.now() - start < timeoutMs) { + const result = await check() + if (!result) { + console.log(` ✓ Sync check passed: ${label}`) + return + } + lastError = result + await new Promise((r) => setTimeout(r, SYNC_INTERVAL)) + } + + throw new Error(`Timed out waiting for sync condition "${label}": ${lastError}`) +} + +test.describe('Multiparty flow between three clients @pipelines-multiparty-flow', () => { + test('three clients execute a multiparty flow with UI interactions', async ({ browser }) => { + const wsPortBase = Number.parseInt(process.env.DEV_WS_BRIDGE_PORT_BASE || '3333', 10) + const wsPort1 = wsPortBase + const wsPort2 = wsPortBase + 1 + const wsPort3 = wsPortBase + 2 + + const expectedEmail1 = process.env.CLIENT1_EMAIL || 'client1@sandbox.local' + const expectedEmail2 = process.env.CLIENT2_EMAIL || 'client2@sandbox.local' + const expectedEmail3 = process.env.AGG_EMAIL || 'aggregator@sandbox.local' + + const logSocket = await ensureLogSocket() + log(logSocket, { + event: 'multiparty-flow-start', + email1: expectedEmail1, + email2: expectedEmail2, + email3: expectedEmail3, + wsPort1, + wsPort2, + wsPort3, + }) + + console.log('=== Multiparty Flow Test ===') + console.log(`Client 1 (contributor1): ${expectedEmail1} (WS port ${wsPort1})`) + console.log(`Client 2 (contributor2): ${expectedEmail2} (WS port ${wsPort2})`) + console.log(`Client 3 (aggregator): ${expectedEmail3} (WS port ${wsPort3})`) + + // Create pages for all three clients + const page1 = await browser.newPage() + const page2 = await browser.newPage() + const page3 = await browser.newPage() + + // Add console listeners to capture browser logs + page1.on('console', (msg) => { + if (msg.text().includes('[Flow Import]') || msg.type() === 'error') { + console.log(`[Browser1] ${msg.type()}: ${msg.text()}`) + } + }) + page2.on('console', (msg) => { + if (msg.text().includes('[Flow Import]') || msg.type() === 'error') { + console.log(`[Browser2] ${msg.type()}: ${msg.text()}`) + } + }) + page3.on('console', (msg) => { + if (msg.text().includes('[Flow Import]') || msg.type() === 'error') { + console.log(`[Browser3] ${msg.type()}: ${msg.text()}`) + } + }) + + await setWsPort(page1, wsPort1) + await setWsPort(page2, wsPort2) + await setWsPort(page3, wsPort3) + + // Connect backends + console.log('\n--- Connecting backends ---') + const backend1 = await connectBackend(wsPort1) + const backend2 = await connectBackend(wsPort2) + const backend3 = await connectBackend(wsPort3) + console.log('All backends connected') + + // Navigate to UI + const uiBaseUrl = process.env.UI_BASE_URL || 'http://localhost:8082' + await page1.goto(uiBaseUrl) + await page2.goto(uiBaseUrl) + await page3.goto(uiBaseUrl) + + // Complete onboarding for all three + console.log('\n--- Onboarding ---') + await completeOnboarding(page1, expectedEmail1, logSocket) + await completeOnboarding(page2, expectedEmail2, logSocket) + await completeOnboarding(page3, expectedEmail3, logSocket) + console.log('All clients onboarded') + + // Ensure dev mode is available + await backend1.invoke('get_dev_mode_info') + await backend2.invoke('get_dev_mode_info') + await backend3.invoke('get_dev_mode_info') + + const settings1 = await backend1.invoke('get_settings') + const settings2 = await backend2.invoke('get_settings') + const settings3 = await backend3.invoke('get_settings') + const email1 = String(settings1?.email || '').trim() + const email2 = String(settings2?.email || '').trim() + const email3 = String(settings3?.email || '').trim() + if (!email1 || !email2 || !email3) { + throw new Error( + `Missing runtime emails from settings: ${JSON.stringify({ email1, email2, email3 })}`, + ) + } + console.log( + `Resolved runtime identities from settings: client1=${email1}, client2=${email2}, aggregator=${email3}`, + ) + + const viewers: ViewerContext[] = [ + { label: email1, backend: backend1 }, + { label: email2, backend: backend2 }, + { label: email3, backend: backend3 }, + ] + + // === Key Exchange Phase === + console.log('\n--- Key Exchange Phase ---') + console.log('Each client importing the other two as contacts...') + + await importContactWithRetry(backend1, email2, email1) + await importContactWithRetry(backend1, email3, email1) + await importContactWithRetry(backend2, email1, email2) + await importContactWithRetry(backend2, email3, email2) + await importContactWithRetry(backend3, email1, email3) + await importContactWithRetry(backend3, email2, email3) + console.log('All key exchanges complete!') + + log(logSocket, { event: 'key-exchange-complete' }) + + // === Create Flow Invitation === + console.log('\n--- Creating Flow Invitation ---') + const timestamp = Date.now() + const flowName = 'multiparty' + const fixtureModuleName = `invite-fixture-${timestamp}` + const fixtureModuleDirName = fixtureModuleName + const fixtureAssetRelativePath = 'assets/fixture-data.txt' + const fixtureAssetContent = `fixture asset for ${fixtureModuleName}` + let sessionId = '' + + // Ensure aggregator has a multiparty flow available in local Flows before opening modal. + const flowSpec = { + apiVersion: 'syftbox.openmined.org/v1alpha1', + kind: 'Flow', + metadata: { + name: flowName, + version: '0.1.0', + }, + spec: { + vars: { + flow_path: 'syft://{datasite.current}/shared/flows/{flow_name}', + run_path: '{vars.flow_path}/{run_id}', + step_path: '{vars.run_path}/{step.number}-{step.id}', + }, + coordination: { + url: '{vars.run_path}/_progress', + share_with: 'all', + }, + datasites: { + all: [email3, email1, email2], + groups: { + aggregator: { include: [email3] }, + clients: { include: [email1, email2] }, + }, + }, + steps: [ + { + id: 'generate', + name: 'Generate Numbers', + description: 'Generate random numbers locally', + run: { targets: 'clients', strategy: 'parallel' }, + share: { + numbers_shared: { + source: 'self.outputs.numbers', + url: '{vars.step_path}/numbers.json', + permissions: { read: [email3] }, + }, + }, + }, + { + id: 'contributions_ready', + name: 'Wait for Contributions', + description: 'Wait for all contributors to share', + barrier: { + wait_for: 'generate', + targets: 'clients', + timeout: 300, + }, + }, + { + id: 'aggregate', + name: 'Aggregate Sum', + description: 'Compute sum of all contributions', + run: { targets: 'aggregator' }, + depends_on: ['contributions_ready'], + share: { + result_shared: { + source: 'self.outputs.result', + url: '{vars.step_path}/result.json', + permissions: { read: [email1, email2, email3] }, + }, + }, + }, + ], + }, + } + + await backend3.invoke( + 'import_flow_from_json', + { + request: { + name: flowName, + flow_json: flowSpec, + overwrite: true, + }, + }, + 60_000, + ) + + // Add a fixture module + asset to the flow folder so invitation source sync/import + // can be validated for bundled module files. + const proposerFlows = await backend3.invoke('get_flows') + const proposerFlow = (proposerFlows || []).find( + (f: any) => + String(f?.name || f?.metadata?.name || '') + .trim() + .toLowerCase() === flowName.toLowerCase(), + ) + const proposerFlowPath = String(proposerFlow?.flow_path || '').trim() + expect(proposerFlowPath).toBeTruthy() + + const fixtureModulePath = `${proposerFlowPath}/modules/${fixtureModuleDirName}` + fs.mkdirSync(`${fixtureModulePath}/assets`, { recursive: true }) + fs.writeFileSync( + `${fixtureModulePath}/module.yaml`, + [ + 'apiVersion: syftbox.openmined.org/v1alpha1', + 'kind: Module', + 'metadata:', + ` name: ${fixtureModuleName}`, + ' version: 0.1.0', + 'spec:', + ' runner:', + ' kind: shell', + ' template: shell', + ' entrypoint: run.sh', + ' assets:', + ` - path: ${fixtureAssetRelativePath}`, + ' outputs:', + ' - name: done', + ' type: File', + ' format: { kind: txt }', + ' path: done.txt', + '', + ].join('\n'), + ) + fs.writeFileSync(`${fixtureModulePath}/run.sh`, '#!/usr/bin/env bash\necho done > done.txt\n') + fs.writeFileSync( + `${fixtureModulePath}/${fixtureAssetRelativePath}`, + `${fixtureAssetContent}\n`, + ) + + // Bootstrap a group thread first so Propose Flow has all participants in-context. + const bootstrap = await backend3.invoke('send_message', { + request: { + recipients: [email1, email2], + body: `bootstrap thread ${timestamp}`, + subject: 'Multiparty bootstrap', + }, + }) + const threadId = bootstrap.thread_id + + // === Navigate to Messages and View Invitation in UI === + console.log('\n--- Navigating to Messages UI ---') + + async function navigateToMessagesAndFindThread(page: Page, label: string) { + await page.click('button:has-text("Messages")') + await page.waitForTimeout(500) + + console.log(` ${label}: Syncing messages...`) + const syncBtn = page.locator('#refresh-messages-btn') + if (await syncBtn.isVisible()) { + await syncBtn.click() + await page.waitForTimeout(2000) + } + + const threadItem = page.locator('.message-thread-item').first() + await threadItem.waitFor({ timeout: UI_TIMEOUT }) + await threadItem.click() + await page.waitForTimeout(250) + console.log(` ${label}: Thread selected`) + } + + // Navigate all three clients to Messages + console.log('Navigating client1 to Messages...') + await navigateToMessagesAndFindThread(page1, email1) + + console.log('Navigating client2 to Messages...') + await navigateToMessagesAndFindThread(page2, email2) + + console.log('Navigating aggregator to Messages...') + await navigateToMessagesAndFindThread(page3, email3) + + // Aggregator creates flow invitation using the actual Propose Flow modal UI. + console.log('\n--- Creating Flow Invitation via UI Modal ---') + const proposeBtn = page3.locator('#propose-flow-btn') + await proposeBtn.waitFor({ timeout: UI_TIMEOUT }) + await expect(proposeBtn).toBeVisible() + await proposeBtn.click() + + const proposeModal = page3.locator('#propose-flow-modal') + await proposeModal.waitFor({ timeout: UI_TIMEOUT }) + await page3.selectOption('#propose-flow-select', { label: flowName }) + await page3.waitForTimeout(500) + + const roleRows = page3.locator('#propose-flow-roles-list .propose-flow-role-row') + const roleCount = await roleRows.count() + console.log(` Aggregator: role rows in modal = ${roleCount}`) + expect(roleCount).toBe(3) + + // Assign roles by label content with unique participant mapping. + const allCandidates = [email1, email2, email3] + const usedEmails = new Set() + for (let i = 0; i < roleCount; i += 1) { + const row = roleRows.nth(i) + const roleLabel = ( + (await row.locator('.propose-flow-role-label').textContent().catch(() => '')) || '' + ) + .toLowerCase() + .trim() + const select = roleRows.nth(i).locator('select') + let preferred = '' + if (roleLabel.includes('aggregator')) { + preferred = email3 + } else if (roleLabel.includes('client') || roleLabel.includes('contributor')) { + if (roleLabel.includes('1')) { + preferred = email1 + } else if (roleLabel.includes('2')) { + preferred = email2 + } + } + const selectedEmail = + (preferred && !usedEmails.has(preferred) ? preferred : '') || + allCandidates.find((candidate) => !usedEmails.has(candidate)) || + preferred || + email1 + await select.selectOption(selectedEmail) + usedEmails.add(selectedEmail) + } + + await page3 + .locator('#propose-flow-message') + .fill(`Join me in a multiparty flow! Flow: ${flowName} - ${timestamp}`) + const sendBtn = page3.locator('#propose-flow-send-btn') + await sendBtn.waitFor({ timeout: UI_TIMEOUT }) + await expect + .poll(async () => { + try { + return await sendBtn.isEnabled() + } catch { + return false + } + }, { timeout: UI_TIMEOUT }) + .toBe(true) + await sendBtn.click({ timeout: UI_TIMEOUT }) + let modalClosed = false + try { + await expect(proposeModal).toBeHidden({ timeout: 6000 }) + modalClosed = true + } catch { + console.log(' Aggregator: modal still open after click, using JS send fallback') + await page3.evaluate(() => window.proposeFlowModal?.sendInvitation?.()) + await expect(proposeModal).toBeHidden({ timeout: UI_TIMEOUT }) + modalClosed = true + } + expect(modalClosed).toBe(true) + await page3.waitForTimeout(1500) + + const invitationForClient1 = await waitForThreadMessageMatching( + backend1, + threadId, + (msg) => normalizeMetadata(msg?.metadata)?.flow_invitation?.flow_name === flowName, + 'flow invitation message (from UI modal)', + ) + const invitationMeta = normalizeMetadata(invitationForClient1?.metadata)?.flow_invitation + sessionId = invitationMeta?.session_id || invitationMeta?.sessionId || '' + const invitationFlowLocation = String(invitationMeta?.flow_location || '').trim() + expect(sessionId).toBeTruthy() + expect(invitationFlowLocation).toBeTruthy() + + console.log(`Flow invitation sent via UI! Thread ID: ${threadId}, Session ID: ${sessionId}`) + log(logSocket, { event: 'flow-invitation-sent', sessionId, threadId }) + + // Wait for invitation cards to render + console.log('\n--- Waiting for Flow Invitation Cards ---') + await page1.waitForTimeout(2000) + await page2.waitForTimeout(2000) + + // === Import and Join Flow via UI === + console.log('\n--- Import and Join Flow via UI ---') + + async function importAndJoinFlow(page: Page, label: string, backend: Backend) { + // Ensure thread view includes both inbound/outbound messages. + const allFilterBtn = page.locator('.message-filter[data-filter="all"]') + if (await allFilterBtn.isVisible().catch(() => false)) { + await allFilterBtn.click().catch(() => {}) + await page.waitForTimeout(250) + } + const refreshBtn = page.locator('#refresh-messages-btn') + if (await refreshBtn.isVisible().catch(() => false)) { + await refreshBtn.click().catch(() => {}) + await page.waitForTimeout(1200) + } + + // Wait for invitation card + const invitationCard = page + .locator('#messages-main:visible #message-conversation .flow-invitation-card:visible') + .first() + try { + await invitationCard.waitFor({ timeout: UI_TIMEOUT }) + console.log(` ${label}: Found visible invitation card`) + } catch { + // Sender can already be joined and only have a View button available; accept that path. + const directOpenBtn = page + .locator( + '#messages-main:visible button:has-text("View Flow"), #messages-main:visible button:has-text("Join Flow")', + ) + .first() + if (await directOpenBtn.isVisible().catch(() => false)) { + console.log(` ${label}: Invitation card not visible; using direct flow button`) + await directOpenBtn.click() + await page.waitForTimeout(800) + return + } + throw new Error(`${label}: flow invitation UI not visible in message thread`) + } + + const importBtn = invitationCard.locator( + '.flow-invitation-btn.import-btn, button:has-text("Import Flow")', + ) + const syncBtn = invitationCard.locator( + '.flow-invitation-btn:has-text("Sync Flow Files"), button:has-text("Sync Flow Files")', + ) + const showFilesBtn = invitationCard.locator( + '.flow-invitation-btn:has-text("Show Flow Files"), button:has-text("Show Flow Files")', + ) + const joinBtn = invitationCard.locator( + '.flow-invitation-btn.view-runs-btn, button:has-text("Join Flow"), button:has-text("View Flow")', + ) + const statusEl = invitationCard.locator('.flow-invitation-status') + if (await importBtn.isVisible({ timeout: 5000 }).catch(() => false)) { + const joinInitiallyVisible = await joinBtn.isVisible().catch(() => false) + console.log(` ${label}: Join button visible before import: ${joinInitiallyVisible}`) + expect(joinInitiallyVisible).toBe(false) + + // Explicitly validate Sync/Show actions before import. + await syncBtn.waitFor({ timeout: UI_TIMEOUT }) + await syncBtn.click() + await expect + .poll( + async () => ((await statusEl.textContent().catch(() => '')) || '').trim(), + { timeout: 45_000 }, + ) + .toContain('Flow files synced and ready to import') + + await showFilesBtn.waitFor({ timeout: UI_TIMEOUT }) + await showFilesBtn.click() + await expect + .poll( + async () => ((await statusEl.textContent().catch(() => '')) || '').trim(), + { timeout: 20_000 }, + ) + .toContain('Opened:') + + // Ensure synced source includes fixture module + asset. + const localFlowSourcePath = await backend.invoke('resolve_syft_url_to_local_path', { + syftUrl: invitationFlowLocation, + }) + expect(localFlowSourcePath).toBeTruthy() + await expect + .poll( + async () => + await backend.invoke('path_exists', { + path: `${localFlowSourcePath}/flow.yaml`, + }), + { timeout: 45_000 }, + ) + .toBe(true) + await expect + .poll( + async () => + await backend.invoke('path_exists', { + path: `${localFlowSourcePath}/modules/${fixtureModuleDirName}/module.yaml`, + }), + { timeout: 45_000 }, + ) + .toBe(true) + await expect + .poll( + async () => + await backend.invoke('path_exists', { + path: `${localFlowSourcePath}/modules/${fixtureModuleDirName}/${fixtureAssetRelativePath}`, + }), + { timeout: 45_000 }, + ) + .toBe(true) + + // Click Import Flow button + await importBtn.click() + console.log(` ${label}: Clicked "Import Flow"`) + + await expect + .poll( + async () => ((await statusEl.textContent().catch(() => '')) || '').trim(), + { timeout: 60_000 }, + ) + .toContain('Flow imported') + + // Check if import button changed + const importBtnText = await importBtn.textContent().catch(() => '') + console.log(` ${label}: Import button text after click: ${importBtnText}`) + + // Ensure fixture module got imported (with asset) into local modules. + await expect + .poll( + async () => { + const modules = await backend.invoke('get_modules') + return (modules || []).find((m: any) => m?.name === fixtureModuleName) || null + }, + { timeout: 60_000 }, + ) + .not.toBeNull() + const importedModules = await backend.invoke('get_modules') + const importedFixture = (importedModules || []).find( + (m: any) => m?.name === fixtureModuleName, + ) + expect(importedFixture?.module_path).toBeTruthy() + await expect + .poll( + async () => + await backend.invoke('path_exists', { + path: `${importedFixture.module_path}/${fixtureAssetRelativePath}`, + }), + { timeout: 30_000 }, + ) + .toBe(true) + const importedAssetText = fs + .readFileSync( + `${importedFixture.module_path}/${fixtureAssetRelativePath}`, + 'utf-8', + ) + .trim() + expect(importedAssetText).toContain(fixtureAssetContent) + } else { + console.log(` ${label}: Import button not visible (flow may already be imported)`) + } + + // Click Join Flow button + const joinBtnVisible = await joinBtn.isVisible({ timeout: 5000 }).catch(() => false) + console.log(` ${label}: Join button visible: ${joinBtnVisible}`) + if (!joinBtnVisible) { + // Debug: check what buttons are visible + const allButtons = await invitationCard.locator('button').allTextContents() + console.log(` ${label}: All buttons in invitation card: ${JSON.stringify(allButtons)}`) + } + await joinBtn.waitFor({ timeout: UI_TIMEOUT }) + await joinBtn.click() + console.log(` ${label}: Clicked "Join Flow"`) + await page.waitForTimeout(1500) + + // Verify post-join state. + // UI can show either "View Flow" or keep "Join Flow" depending on role/timing, + // so assert using both button text and card state. + const joinBtnText = ((await joinBtn.textContent().catch(() => '')) || '').trim() + console.log(` ${label}: Join button text after join: "${joinBtnText}"`) + const joinLooksCompleted = + joinBtnText.includes('View Flow') || joinBtnText.includes('Join Flow') + expect(joinLooksCompleted).toBe(true) + + // Verify Decline button is hidden after joining (authoritative signal). + const declineBtn = invitationCard.locator('.decline-btn') + const declineVisible = await declineBtn.isVisible().catch(() => false) + console.log(` ${label}: Decline button visible after join: ${declineVisible}`) + expect(declineVisible).toBe(false) + } + + // Client 1 imports and joins + console.log(`${email1} importing and joining flow...`) + await importAndJoinFlow(page1, email1, backend1) + + // Client 2 imports and joins + console.log(`${email2} importing and joining flow...`) + await importAndJoinFlow(page2, email2, backend2) + + // Aggregator also imports and joins via UI + console.log(`${email3} importing and joining flow...`) + await importAndJoinFlow(page3, email3, backend3) + + log(logSocket, { event: 'all-joined' }) + + console.log('\n--- Verifying Initial Shared Coordination Sync ---') + for (const viewer of viewers) { + const initialProgress = await viewer.backend.invoke('get_all_participant_progress', { + sessionId, + }) + expect(Array.isArray(initialProgress)).toBe(true) + expect((initialProgress || []).length).toBeGreaterThanOrEqual(3) + } + + // === Navigate to Runs Tab and Execute Steps === + console.log('\n--- Navigating to Runs Tab ---') + + async function navigateToRuns(page: Page, label: string) { + await page.click('button:has-text("Runs")') + await page.waitForTimeout(1000) + console.log(` ${label}: On Runs tab`) + } + + // All clients navigate to Runs + await navigateToRuns(page1, email1) + await navigateToRuns(page2, email2) + await navigateToRuns(page3, email3) + + // === Verify Multiparty Run Cards === + console.log('\n--- Verifying Multiparty Run Cards ---') + + async function verifyMultipartyRun(page: Page, label: string) { + // Look for run card with multiparty indicator + const runCard = page.locator('.flow-run-card').first() + await runCard.waitFor({ timeout: UI_TIMEOUT }) + + // Card should auto-expand as latest run, wait for steps to load + await page.waitForTimeout(1500) + + // Verify multiparty details are visible + const mpDetails = runCard.locator('.multiparty-details') + const isMultiparty = await mpDetails.isVisible().catch(() => false) + console.log(` ${label}: Multiparty details visible: ${isMultiparty}`) + expect(isMultiparty).toBe(true) + + // Verify participants section + const participants = runCard.locator('.mp-participants .mp-participant') + const participantCount = await participants.count() + console.log(` ${label}: Found ${participantCount} participants`) + expect(participantCount).toBe(3) + + // Verify steps section loaded + const steps = runCard.locator('.mp-steps-list .mp-step') + const stepCount = await steps.count() + console.log(` ${label}: Found ${stepCount} steps`) + expect(stepCount).toBe(3) + + // Verify progress bar exists + const progressBar = runCard.locator('.mp-progress-bar') + expect(await progressBar.isVisible()).toBe(true) + + return runCard + } + + const runCard1 = await verifyMultipartyRun(page1, email1) + const runCard2 = await verifyMultipartyRun(page2, email2) + const runCard3 = await verifyMultipartyRun(page3, email3) + + // === Verify Initial Button State === + console.log('\n--- Verifying Initial Button Visibility ---') + + // Contributors should see Run button for 'generate' (first step, Ready) + await verifyRunButtonVisibility(page1, 'generate', true, email1) + await verifyRunButtonVisibility(page2, 'generate', true, email2) + + // Aggregator should NOT see Run button for 'aggregate' (WaitingForInputs) + await verifyRunButtonVisibility(page3, 'aggregate', false, email3) + + // Initial progress should be 0/3 steps complete (total flow progress) + const initialProgress1 = await verifyProgressInUI(page1, email1) + expect(initialProgress1).toContain('0/3 steps complete') + + const initialProgress3 = await verifyProgressInUI(page3, email3) + expect(initialProgress3).toContain('0/3 steps complete') + + // Run Next should be visible for contributors, hidden for aggregator until ready + await verifyRunNextButton(page1, true, 'Generate Numbers', email1) + await verifyRunNextButton(page2, true, 'Generate Numbers', email2) + await verifyRunNextButton(page3, false, null, email3) + + // Auto-run checkbox should persist to flow state + const autoToggle1 = page1.locator( + '.mp-step[data-step-id="generate"] .mp-auto-toggle input[type="checkbox"]', + ) + await autoToggle1.waitFor({ timeout: UI_TIMEOUT }) + await autoToggle1.check() + await page1.waitForTimeout(300) + const stateAfterAutoOn = await backend1.invoke('get_multiparty_flow_state', { sessionId }) + expect(stateAfterAutoOn?.steps?.find((s: any) => s.id === 'generate')?.auto_run).toBe(true) + await autoToggle1.uncheck() + await page1.waitForTimeout(300) + const stateAfterAutoOff = await backend1.invoke('get_multiparty_flow_state', { sessionId }) + expect(stateAfterAutoOff?.steps?.find((s: any) => s.id === 'generate')?.auto_run).toBe(false) + + // Aggregator aggregate step should not be ready before contributor shares + const initialAggState = await backend3.invoke('get_multiparty_flow_state', { sessionId }) + const initialAggStatus = initialAggState?.steps?.find((s: any) => s.id === 'aggregate')?.status + expect(['Pending', 'WaitingForInputs']).toContain(initialAggStatus) + + // Step should show participant chips for all 3 parties + const aggGenerateChips = page3.locator('.mp-step[data-step-id="generate"] .mp-participant-chip') + expect(await aggGenerateChips.count()).toBe(3) + const chipsText = (await aggGenerateChips.allTextContents()).join(' ') + expect(chipsText).toContain(email1) + expect(chipsText).toContain(email2) + expect(chipsText).toContain(email3) + + // Activity log should include join events + await verifyActivityLogContains(page3, email3, 'joined the flow', 20_000, backend3, sessionId) + + // === Execute Steps via UI === + console.log('\n--- Executing Flow Steps via Runs UI ---') + + async function ensureStepsTabActive(page: Page) { + const runCard = page.locator('.flow-run-card').first() + const stepsTab = runCard.locator('.mp-tab[data-tab="steps"]') + if (await stepsTab.isVisible().catch(() => false)) { + await stepsTab.click().catch(() => {}) + await runCard + .locator('.mp-tab-content[data-tab-content="steps"]') + .waitFor({ state: 'visible', timeout: UI_TIMEOUT }) + .catch(() => {}) + } + } + + async function ensureStepExpanded(page: Page, stepId: string) { + await ensureStepsTabActive(page) + const step = page.locator(`.mp-step[data-step-id="${stepId}"]`) + await step.waitFor({ timeout: UI_TIMEOUT }) + const isCollapsed = await step.evaluate((el) => el.classList.contains('collapsed')) + if (isCollapsed) { + await step.locator('.mp-step-toggle').click() + await page.waitForTimeout(150) + } + } + + async function runStepInUI(page: Page, stepId: string, label: string) { + const step = page.locator(`.mp-step[data-step-id="${stepId}"]`) + for (let attempt = 0; attempt < 12; attempt += 1) { + await ensureStepsTabActive(page) + if (!(await step.isVisible().catch(() => false))) { + await page.waitForTimeout(400) + continue + } + + await ensureStepExpanded(page, stepId) + + const statusClass = await step.getAttribute('class') + if ( + statusClass?.includes('mp-step-running') || + statusClass?.includes('mp-step-completed') || + statusClass?.includes('mp-step-shared') + ) { + console.log(` ${label}: ${stepId} already in progress/completed`) + return true + } + + const runBtn = step.locator('.mp-run-btn, button:has-text("Run")') + if (await runBtn.isVisible({ timeout: 1200 }).catch(() => false)) { + await runBtn.click() + console.log(` ${label}: Clicked Run for ${stepId}`) + await page.waitForTimeout(1000) + return true + } + + await page.waitForTimeout(500) + if (attempt === 5) { + // Mid-way nudge in headed runs where render can lag after many updates. + await page.click('button:has-text("Runs")').catch(() => {}) + await page.waitForTimeout(400) + } + } + + console.log(` ${label}: Run button for ${stepId} never became clickable`) + return false + } + + async function shareStepInUI(page: Page, stepId: string, label: string) { + const step = page.locator(`.mp-step[data-step-id="${stepId}"]`) + for (let attempt = 0; attempt < 12; attempt += 1) { + await ensureStepsTabActive(page) + if (!(await step.isVisible().catch(() => false))) { + await page.waitForTimeout(400) + continue + } + await ensureStepExpanded(page, stepId) + + const statusClass = await step.getAttribute('class') + if (statusClass?.includes('mp-step-shared')) { + console.log(` ${label}: ${stepId} already shared`) + return true + } + + const shareBtn = step.locator('.mp-share-btn, button:has-text("Share")') + if (await shareBtn.isVisible({ timeout: 1200 }).catch(() => false)) { + await shareBtn.click() + console.log(` ${label}: Clicked Share for ${stepId}`) + await page.waitForTimeout(1500) + return true + } + + await page.waitForTimeout(500) + } + console.log(` ${label}: Share button for ${stepId} never became clickable`) + return false + } + + // Helper to verify output files exist after running a step + async function verifyStepOutputFiles( + backend: Backend, + sessionIdVal: string, + stepId: string, + expectedFiles: string[], + label: string, + ) { + try { + const files = await backend.invoke('get_step_output_files', { + sessionId: sessionIdVal, + stepId: stepId, + }) + console.log(` ${label}: Output files for ${stepId}: ${JSON.stringify(files)}`) + + for (const expectedFile of expectedFiles) { + const found = files.some((f: string) => f.endsWith(expectedFile)) + if (found) { + console.log(` ${label}: ✓ Found expected output file: ${expectedFile}`) + } else { + console.log(` ${label}: ✗ Missing expected output file: ${expectedFile}`) + } + expect(found).toBe(true) + } + return files + } catch (err) { + console.log(` ${label}: Error getting output files: ${err}`) + return [] + } + } + + // Helper to verify step status in UI + async function verifyStepStatusInUI( + page: Page, + stepId: string, + expectedStatus: string, + label: string, + ) { + await ensureStepsTabActive(page) + const step = page.locator(`.mp-step[data-step-id="${stepId}"]`) + const statusEl = step.locator('.mp-step-status') + const statusText = await statusEl.textContent().catch(() => '') + console.log(` ${label}: Step ${stepId} UI status: "${statusText}"`) + return statusText + } + + // Helper to verify progress bar percentage in UI + async function verifyProgressInUI(page: Page, label: string) { + const progressText = page.locator('.mp-progress-text') + const text = await progressText.textContent().catch(() => '') + console.log(` ${label}: Progress: ${text}`) + return text + } + + // Helper to verify Run button visibility for a step + async function verifyRunButtonVisibility( + page: Page, + stepId: string, + shouldBeVisible: boolean, + label: string, + ) { + await ensureStepsTabActive(page) + const step = page.locator(`.mp-step[data-step-id="${stepId}"]`) + if (!(await step.isVisible().catch(() => false))) { + console.log(` ${label}: Step ${stepId} not visible on this page`) + return null + } + await ensureStepExpanded(page, stepId) + const runBtn = step.locator('.mp-run-btn') + const isVisible = await runBtn.isVisible().catch(() => false) + console.log( + ` ${label}: Run button for ${stepId}: visible=${isVisible}, expected=${shouldBeVisible}`, + ) + expect(isVisible).toBe(shouldBeVisible) + return isVisible + } + + // Helper to verify Preview button visibility + async function verifyPreviewButtonVisibility( + page: Page, + stepId: string, + shouldBeVisible: boolean, + label: string, + ) { + await ensureStepsTabActive(page) + const step = page.locator(`.mp-step[data-step-id="${stepId}"]`) + if (!(await step.isVisible().catch(() => false))) { + return null + } + const previewBtn = step.locator('.mp-preview-btn').first() + let isVisible = await previewBtn.isVisible().catch(() => false) + if (shouldBeVisible && !isVisible) { + const deadline = Date.now() + UI_TIMEOUT + while (Date.now() < deadline) { + await ensureStepExpanded(page, stepId) + await page.waitForTimeout(250) + isVisible = await previewBtn.isVisible().catch(() => false) + if (isVisible) break + } + } + console.log( + ` ${label}: Preview button for ${stepId}: visible=${isVisible}, expected=${shouldBeVisible}`, + ) + expect(isVisible).toBe(shouldBeVisible) + return isVisible + } + + async function verifyShareButtonVisibility( + page: Page, + stepId: string, + shouldBeVisible: boolean, + label: string, + ) { + await ensureStepsTabActive(page) + const step = page.locator(`.mp-step[data-step-id="${stepId}"]`) + if (!(await step.isVisible().catch(() => false))) return null + await ensureStepExpanded(page, stepId) + const shareBtn = step.locator('.mp-share-btn').first() + const isVisible = await shareBtn.isVisible().catch(() => false) + console.log( + ` ${label}: Share button for ${stepId}: visible=${isVisible}, expected=${shouldBeVisible}`, + ) + expect(isVisible).toBe(shouldBeVisible) + return isVisible + } + + async function verifyContributionButtons( + page: Page, + stepId: string, + minimumCount: number, + label: string, + ) { + await ensureStepsTabActive(page) + const step = page.locator(`.mp-step[data-step-id="${stepId}"]`) + await ensureStepExpanded(page, stepId) + const finderButtons = step.locator('.mp-contrib-open-btn') + const count = await finderButtons.count() + console.log(` ${label}: Contribution finder buttons for ${stepId}: ${count}`) + expect(count).toBeGreaterThanOrEqual(minimumCount) + return count + } + + async function verifyRunNextButton( + page: Page, + shouldBeVisible: boolean, + expectedText: string | null, + label: string, + ) { + const runNextBtn = page.locator('.mp-run-next-btn') + const isVisible = await runNextBtn.isVisible().catch(() => false) + console.log(` ${label}: Run Next visible=${isVisible}, expected=${shouldBeVisible}`) + expect(isVisible).toBe(shouldBeVisible) + if (shouldBeVisible && expectedText) { + const text = (await runNextBtn.textContent().catch(() => '')) || '' + console.log(` ${label}: Run Next text="${text}"`) + expect(text).toContain(expectedText) + } + } + + async function verifyActivityLogContains( + page: Page, + label: string, + expectedText: string, + timeoutMs = 20_000, + backend?: Backend, + sessionIdForBackend?: string, + ) { + const runCard = page.locator('.flow-run-card').first() + const start = Date.now() + while (Date.now() - start < timeoutMs) { + await runCard + .locator('.mp-tab[data-tab="logs"]') + .click() + .catch(() => {}) + await page.waitForTimeout(250) + const logsText = + (await runCard + .locator('.mp-logs-content') + .innerText() + .catch(() => '')) || '' + if (logsText.includes(expectedText)) { + console.log(` ${label}: Activity log contains "${expectedText}"`) + await runCard.locator('.mp-tab[data-tab="steps"]').click() + await runCard + .locator('.mp-tab-content[data-tab-content="steps"]') + .waitFor({ state: 'visible', timeout: UI_TIMEOUT }) + .catch(() => {}) + return + } + if (backend && sessionIdForBackend) { + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + const backendLogs = await backend + .invoke('get_participant_logs', { sessionId: sessionIdForBackend }) + .catch(() => []) + if (Array.isArray(backendLogs)) { + const found = backendLogs.some((entry: any) => { + const participant = entry?.participant || entry?.role || 'participant' + const text = + entry?.event === 'joined' + ? `${participant} joined the flow` + : entry?.event === 'step_completed' + ? `${participant} completed step "${entry?.step_id}"` + : entry?.event === 'step_shared' + ? `${participant} shared outputs from "${entry?.step_id}"` + : `${participant}: ${entry?.event || ''}` + return text.includes(expectedText) + }) + if (found) { + console.log(` ${label}: Activity log matched via backend "${expectedText}"`) + await runCard.locator('.mp-tab[data-tab="steps"]').click() + await runCard + .locator('.mp-tab-content[data-tab-content="steps"]') + .waitFor({ state: 'visible', timeout: UI_TIMEOUT }) + .catch(() => {}) + return + } + } + } + await page.waitForTimeout(500) + } + await runCard.locator('.mp-tab[data-tab="steps"]').click() + await runCard + .locator('.mp-tab-content[data-tab-content="steps"]') + .waitFor({ state: 'visible', timeout: UI_TIMEOUT }) + .catch(() => {}) + throw new Error(`${label}: Activity log missing expected text: ${expectedText}`) + } + + async function sendMessageInSelectedThread(page: Page, label: string, body: string) { + let messageInput = page.locator('#message-compose-body:visible').first() + let inputVisible = await messageInput.isVisible().catch(() => false) + if (!inputVisible) { + console.log(` ${label}: Composer hidden, re-selecting thread...`) + for (let attempt = 0; attempt < 5; attempt += 1) { + await page.click('button:has-text("Messages")').catch(() => {}) + await page.waitForTimeout(200) + const threadItem = page.locator('.message-thread-item').first() + if (await threadItem.isVisible().catch(() => false)) { + await threadItem.click({ force: true }).catch(() => {}) + } + await page.waitForTimeout(350) + messageInput = page.locator('#message-compose-body:visible').first() + inputVisible = await messageInput.isVisible().catch(() => false) + if (inputVisible) break + } + if (!inputVisible) { + throw new Error(`${label}: message composer remained hidden`) + } + } + await messageInput.waitFor({ timeout: UI_TIMEOUT }) + await messageInput.fill(body) + await page.locator('#message-send-btn:visible').first().click() + console.log(` ${label}: Sent message: "${body.substring(0, 40)}..."`) + await page.waitForTimeout(1500) + } + + // Step 1: Contributors run "generate" + console.log('\nStep 1: Generate Numbers') + const ranGenerate1 = await runStepInUI(page1, 'generate', email1) + expect(ranGenerate1).toBe(true) + await page1.waitForTimeout(1500) // Wait for file to be written + await verifyStepOutputFiles(backend1, sessionId, 'generate', ['numbers.json'], email1) + + // Verify UI shows step as completed and review/share actions are available + await verifyStepStatusInUI(page1, 'generate', 'Completed', email1) + const progress1AfterGen = await verifyProgressInUI(page1, email1) + expect(/0\/3|1\/3/.test(progress1AfterGen || '')).toBe(true) + await verifyPreviewButtonVisibility(page1, 'generate', true, email1) + await verifyShareButtonVisibility(page1, 'generate', true, email1) + + const ranGenerate2 = await runStepInUI(page2, 'generate', email2) + expect(ranGenerate2).toBe(true) + await page2.waitForTimeout(1500) // Wait for file to be written + await verifyStepOutputFiles(backend2, sessionId, 'generate', ['numbers.json'], email2) + + // Verify UI shows step as completed and review/share actions are available + await verifyStepStatusInUI(page2, 'generate', 'Completed', email2) + await verifyProgressInUI(page2, email2) + await verifyPreviewButtonVisibility(page2, 'generate', true, email2) + await verifyShareButtonVisibility(page2, 'generate', true, email2) + + // Step 2: Contributors share from the same Generate step (run -> review -> share) + console.log('\nStep 2: Share Generate Outputs') + const sharedGenerate1 = await shareStepInUI(page1, 'generate', email1) + expect(sharedGenerate1).toBe(true) + + // Verify UI shows step as shared + await page1.waitForTimeout(500) + await verifyStepStatusInUI(page1, 'generate', 'Shared', email1) + const progress1AfterShare = await verifyProgressInUI(page1, email1) + expect(/0\/3|1\/3|2\/3/.test(progress1AfterShare || '')).toBe(true) + + const sharedGenerate2 = await shareStepInUI(page2, 'generate', email2) + expect(sharedGenerate2).toBe(true) + + // Verify UI shows step as shared + await page2.waitForTimeout(500) + await verifyStepStatusInUI(page2, 'generate', 'Shared', email2) + const progress2AfterShare = await verifyProgressInUI(page2, email2) + expect(/0\/3|1\/3|2\/3|Done/.test(progress2AfterShare || '')).toBe(true) + + // Aggregator should expose contributor finder links on generate step. + await backend3.invoke('trigger_syftbox_sync').catch(() => {}) + await verifyContributionButtons(page3, 'generate', 2, email3) + + // Simulate receiving shared inputs at aggregator + // In real flow, this would happen via messaging - for now we verify generate outputs exist + console.log('\nVerifying contributor outputs before aggregation...') + const files1 = await verifyStepOutputFiles( + backend1, + sessionId, + 'generate', + ['numbers.json'], + email1, + ) + const files2 = await verifyStepOutputFiles( + backend2, + sessionId, + 'generate', + ['numbers.json'], + email2, + ) + + // Step 3: Barrier + Aggregate + console.log('\nStep 3: Aggregate Sum') + // After contributors share, the barrier step 'contributions_ready' should complete + // which unblocks 'aggregate' (depends_on: [contributions_ready]) + + const aggState = await backend3.invoke('get_multiparty_flow_state', { sessionId }) + const barrierStep = aggState?.steps?.find((s: any) => s.id === 'contributions_ready') + const aggStep = aggState?.steps?.find((s: any) => s.id === 'aggregate') + console.log(` Barrier step status: ${barrierStep?.status}`) + console.log(` Aggregator aggregate step status: ${aggStep?.status}`) + + // Aggregate should now be Ready (barrier completed after contributors shared) + expect(aggStep?.status).toBe('Ready') + + // Run aggregate step + const aggregateRan = await runStepInUI(page3, 'aggregate', email3) + expect(aggregateRan).toBe(true) + await page3.waitForTimeout(1500) + + // Verify aggregate output files + await verifyStepOutputFiles(backend3, sessionId, 'aggregate', ['result.json'], email3) + + // Verify aggregate result contains contributor data + const aggResultFiles = await backend3.invoke('get_step_output_files', { + sessionId: sessionId, + stepId: 'aggregate', + }) + console.log(` Aggregator aggregate output: ${JSON.stringify(aggResultFiles)}`) + expect(aggResultFiles.length).toBeGreaterThan(0) + + // Read the result file to verify it contains contributor data + const resultPath = aggResultFiles.find((f: string) => f.endsWith('result.json')) + if (resultPath) { + const resultContent = fs.readFileSync(resultPath, 'utf-8') + const resultData = JSON.parse(resultContent) + console.log( + ` Aggregate result: contributions=${resultData.contributions?.length}, all_numbers=${resultData.all_numbers?.length}, total_sum=${resultData.total_sum}`, + ) + + // Verify we got data from both contributors + expect(resultData.contributions?.length).toBe(2) + expect(resultData.all_numbers?.length).toBeGreaterThan(0) + expect(resultData.count).toBeGreaterThan(0) + } + + // Verify UI shows aggregate as completed + await verifyStepStatusInUI(page3, 'aggregate', 'Completed', email3) + const progressAfterAgg = await verifyProgressInUI(page3, email3) + expect(/2\/3|Done/.test(progressAfterAgg || '')).toBe(true) + await verifyPreviewButtonVisibility(page3, 'aggregate', true, email3) + await verifyShareButtonVisibility(page3, 'aggregate', true, email3) + + // Share final results from the same Aggregate step + const sharedAggregate = await shareStepInUI(page3, 'aggregate', email3) + expect(sharedAggregate).toBe(true) + await page3.waitForTimeout(500) + await verifyStepStatusInUI(page3, 'aggregate', 'Shared', email3) + const progressAfterShare = await verifyProgressInUI(page3, email3) + expect(progressAfterShare).toContain('Done') + const aggSharedFiles = await backend3.invoke('get_step_output_files', { + sessionId, + stepId: 'aggregate', + }) + const aggPermFile = (aggSharedFiles || []).find((f: string) => f.endsWith('syft.pub.yaml')) + expect(aggPermFile).toBeTruthy() + const aggPermText = fs.readFileSync(String(aggPermFile), 'utf-8') + const readPrincipals = extractReadPrincipalsFromSyftPub(aggPermText) + expect(readPrincipals).toEqual(expect.arrayContaining([email1, email2, email3])) + // Aggregate output may be directly resolvable (no per-contribution finder button needed). + await verifyContributionButtons(page1, 'aggregate', 0, email1) + await verifyContributionButtons(page2, 'aggregate', 0, email2) + const waitingBannerClient1 = + (await runCard1 + .locator('.mp-waiting-banner') + .innerText() + .catch(() => '')) || '' + expect(waitingBannerClient1.toLowerCase()).not.toContain(email2.toLowerCase()) + + // Shared step outputs should be published into chat messages with metadata/files + const contributionSharedMsg = await waitForThreadMessageMatching( + backend3, + threadId, + (msg) => normalizeMetadata(msg?.metadata)?.flow_results?.step_id === 'generate', + 'contribution share results message', + ) + const contributionMeta = normalizeMetadata(contributionSharedMsg?.metadata)?.flow_results + expect(Array.isArray(contributionMeta?.files)).toBe(true) + expect(contributionMeta?.files?.length).toBeGreaterThan(0) + + const resultSharedMsg = await waitForThreadMessageMatching( + backend1, + threadId, + (msg) => normalizeMetadata(msg?.metadata)?.flow_results?.step_id === 'aggregate', + 'final result share message', + ) + const resultMeta = normalizeMetadata(resultSharedMsg?.metadata)?.flow_results + expect(Array.isArray(resultMeta?.files)).toBe(true) + expect(resultMeta?.files?.length).toBeGreaterThan(0) + const resultSharedMsgClient2 = await waitForThreadMessageMatching( + backend2, + threadId, + (msg) => normalizeMetadata(msg?.metadata)?.flow_results?.step_id === 'aggregate', + 'final result share message (client2)', + ) + const resultMetaClient2 = normalizeMetadata(resultSharedMsgClient2?.metadata)?.flow_results + expect(Array.isArray(resultMetaClient2?.files)).toBe(true) + expect(resultMetaClient2?.files?.length).toBeGreaterThan(0) + + log(logSocket, { event: 'flow-steps-completed' }) + + // === Send Messages to Chat and Verify === + console.log('\n--- Sending Messages Between Participants ---') + + // Navigate all clients to Messages and select the group thread + await navigateToMessagesAndFindThread(page1, email1) + await navigateToMessagesAndFindThread(page2, email2) + await navigateToMessagesAndFindThread(page3, email3) + + // Everyone sends a hello message in the group chat + const hello1 = `Hello from ${email1} at ${Date.now()}` + const hello2 = `Hello from ${email2} at ${Date.now()}` + const hello3 = `Hello from ${email3} at ${Date.now()}` + await sendMessageInSelectedThread(page1, email1, hello1) + await sendMessageInSelectedThread(page2, email2, hello2) + await sendMessageInSelectedThread(page3, email3, hello3) + + // Verify all participants can receive all hello messages in the thread + const participantsBackends = [ + { email: email1, backend: backend1 }, + { email: email2, backend: backend2 }, + { email: email3, backend: backend3 }, + ] + const helloMessages = [hello1, hello2, hello3] + for (const participant of participantsBackends) { + for (const hello of helloMessages) { + await waitForThreadMessageMatching( + participant.backend, + threadId, + (msg) => (msg?.body || '').includes(hello), + `${participant.email} sees hello message`, + ) + } + } + + // Navigate back to Runs to check final state + await page1.click('button:has-text("Runs")') + await page2.click('button:has-text("Runs")') + await page3.click('button:has-text("Runs")') + await page1.waitForTimeout(500) + await page2.waitForTimeout(500) + await page3.waitForTimeout(500) + + // === Verify Final State === + console.log('\n--- Final Verification ---') + + const finalState1 = await backend1.invoke('get_multiparty_flow_state', { sessionId }) + const finalState2 = await backend2.invoke('get_multiparty_flow_state', { sessionId }) + const finalState3 = await backend3.invoke('get_multiparty_flow_state', { sessionId }) + + console.log(`\n${email1} final step states:`) + finalState1?.steps?.forEach((s: any) => console.log(` - ${s.name}: ${s.status}`)) + + console.log(`\n${email2} final step states:`) + finalState2?.steps?.forEach((s: any) => console.log(` - ${s.name}: ${s.status}`)) + + console.log(`\n${email3} final step states:`) + finalState3?.steps?.forEach((s: any) => console.log(` - ${s.name}: ${s.status}`)) + + // Verify UI shows correct step status for each client + console.log('\n--- Verifying Final UI State ---') + const firstRunId1 = finalState1?.run_id + const firstRunId2 = finalState2?.run_id + const firstRunId3 = finalState3?.run_id + + // Client1 should show generate as shared + const client1GenerateStatus = finalState1?.steps?.find((s: any) => s.id === 'generate')?.status + console.log(` ${email1}: generate=${client1GenerateStatus}`) + expect(client1GenerateStatus).toBe('Shared') + + // Client2 should show generate as shared + const client2GenerateStatus = finalState2?.steps?.find((s: any) => s.id === 'generate')?.status + console.log(` ${email2}: generate=${client2GenerateStatus}`) + expect(client2GenerateStatus).toBe('Shared') + + // Aggregator should have shared aggregate + const aggAggregateStatus = finalState3?.steps?.find((s: any) => s.id === 'aggregate')?.status + console.log(` ${email3}: aggregate=${aggAggregateStatus}`) + expect(aggAggregateStatus).toBe('Shared') + + // Verify UI progress shows correct count + const finalProgress1 = await verifyProgressInUI(page1, email1) + const finalProgress2 = await verifyProgressInUI(page2, email2) + const finalProgress3 = await verifyProgressInUI(page3, email3) + + // All clients should see full flow completion. + expect(finalProgress1).toContain('Done') + expect(finalProgress2).toContain('Done') + expect(finalProgress3).toContain('Done') + expect(await runCard1.evaluate((el) => el.classList.contains('mp-run-complete'))).toBe(true) + expect(await runCard2.evaluate((el) => el.classList.contains('mp-run-complete'))).toBe(true) + expect(await runCard3.evaluate((el) => el.classList.contains('mp-run-complete'))).toBe(true) + + // === Verify Results in Chat === + console.log('\n--- Checking for Shared Results in Chat ---') + + // Navigate back to Messages to see if results appeared + await page1.click('button:has-text("Messages")') + await page1.waitForTimeout(1000) + + // Sync and check for result messages + const syncBtn1 = page1.locator('#refresh-messages-btn') + if (await syncBtn1.isVisible()) { + await syncBtn1.click() + await page1.waitForTimeout(2000) + } + + // === Re-propose Same Flow in Same Group Thread (new session/new run) === + console.log('\n--- Re-proposing Same Flow in Same Group Thread ---') + const previousSessionId = sessionId + const secondTimestamp = Date.now() + + await navigateToMessagesAndFindThread(page3, email3) + await proposeBtn.waitFor({ timeout: UI_TIMEOUT }) + await proposeBtn.click() + await proposeModal.waitFor({ timeout: UI_TIMEOUT }) + await page3.selectOption('#propose-flow-select', { label: flowName }) + await page3.waitForTimeout(500) + + const secondRoleRows = page3.locator('#propose-flow-roles-list .propose-flow-role-row') + const secondRoleCount = await secondRoleRows.count() + expect(secondRoleCount).toBe(3) + + // Deliberately assign clients in reverse order from the first invitation. + const secondAllCandidates = [email1, email2, email3] + const secondUsedEmails = new Set() + for (let i = 0; i < secondRoleCount; i += 1) { + const row = secondRoleRows.nth(i) + const roleLabel = ( + (await row.locator('.propose-flow-role-label').textContent().catch(() => '')) || '' + ) + .toLowerCase() + .trim() + const select = row.locator('select') + let preferred = '' + + if (roleLabel.includes('aggregator')) { + preferred = email3 + } else if (roleLabel.includes('1')) { + preferred = email2 + } else if (roleLabel.includes('2')) { + preferred = email1 + } + const selectedEmail = + (preferred && !secondUsedEmails.has(preferred) ? preferred : '') || + secondAllCandidates.find((candidate) => !secondUsedEmails.has(candidate)) || + preferred || + email2 + await select.selectOption(selectedEmail) + secondUsedEmails.add(selectedEmail) + } + + await page3 + .locator('#propose-flow-message') + .fill(`Second run in same thread (reordered clients) - ${secondTimestamp}`) + await expect + .poll(async () => { + try { + return await sendBtn.isEnabled() + } catch { + return false + } + }, { timeout: UI_TIMEOUT }) + .toBe(true) + await sendBtn.click() + try { + await expect(proposeModal).toBeHidden({ timeout: 6000 }) + } catch { + console.log(' Aggregator: second invite modal still open after click, using JS send fallback') + await page3.evaluate(() => window.proposeFlowModal?.sendInvitation?.()) + await expect(proposeModal).toBeHidden({ timeout: UI_TIMEOUT }) + } + await page3.waitForTimeout(1500) + + const secondInvitation = await waitForThreadMessageMatching( + backend1, + threadId, + (msg) => { + const invite = normalizeMetadata(msg?.metadata)?.flow_invitation + if (!invite || invite.flow_name !== flowName) return false + const sid = invite.session_id || invite.sessionId + return Boolean(sid && sid !== previousSessionId) + }, + 'second flow invitation in same group thread', + ) + + const secondInviteMeta = normalizeMetadata(secondInvitation?.metadata)?.flow_invitation + const secondSessionId = secondInviteMeta?.session_id || secondInviteMeta?.sessionId || '' + expect(secondSessionId).toBeTruthy() + expect(secondSessionId).not.toBe(previousSessionId) + + const secondParticipants = secondInviteMeta?.participants || [] + const secondFlowSpec = secondInviteMeta?.flow_spec + expect(Array.isArray(secondParticipants)).toBe(true) + expect(secondParticipants.length).toBe(3) + expect(secondFlowSpec).toBeTruthy() + + // Accept second invitation for all participants and verify it becomes a distinct run. + const acceptPayload = { + sessionId: secondSessionId, + flowName, + flowSpec: secondFlowSpec, + participants: secondParticipants, + autoRunAll: false, + threadId, + } + await backend1.invoke('accept_flow_invitation', acceptPayload) + await backend2.invoke('accept_flow_invitation', acceptPayload) + await backend3.invoke('accept_flow_invitation', acceptPayload) + + const secondState1 = await backend1.invoke('get_multiparty_flow_state', { + sessionId: secondSessionId, + }) + const secondState2 = await backend2.invoke('get_multiparty_flow_state', { + sessionId: secondSessionId, + }) + const secondState3 = await backend3.invoke('get_multiparty_flow_state', { + sessionId: secondSessionId, + }) + + expect(secondState1?.session_id).toBe(secondSessionId) + expect(secondState2?.session_id).toBe(secondSessionId) + expect(secondState3?.session_id).toBe(secondSessionId) + + expect(secondState1?.run_id).toBeTruthy() + expect(secondState2?.run_id).toBeTruthy() + expect(secondState3?.run_id).toBeTruthy() + + expect(secondState1?.run_id).not.toBe(firstRunId1) + expect(secondState2?.run_id).not.toBe(firstRunId2) + expect(secondState3?.run_id).not.toBe(firstRunId3) + + console.log('\n=== Multiparty Flow Test Complete! ===') + log(logSocket, { event: 'multiparty-flow-test-complete' }) + + // Interactive mode pause + if (process.env.INTERACTIVE_MODE === '1') { + console.log('\n--- Interactive Mode ---') + console.log('All three clients have completed the flow. You can interact with them:') + console.log(` Client 1: ${uiBaseUrl}?ws=${wsPort1}&real=1`) + console.log(` Client 2: ${uiBaseUrl}?ws=${wsPort2}&real=1`) + console.log(` Client 3: ${uiBaseUrl}?ws=${wsPort3}&real=1`) + console.log('\nInteractive pause disabled; continuing cleanup.') + } + + // Cleanup - close pages first to stop polling intervals and prevent WS errors + await page1.close() + await page2.close() + await page3.close() + + await backend1.close() + await backend2.close() + await backend3.close() + + if (logSocket) { + await new Promise((resolve) => { + logSocket.once('close', () => resolve()) + logSocket.close() + }) + } + }) +}) diff --git a/tests/ui/messages-multiparty.spec.ts b/tests/ui/messages-multiparty.spec.ts new file mode 100644 index 00000000..f28abcb3 --- /dev/null +++ b/tests/ui/messages-multiparty.spec.ts @@ -0,0 +1,677 @@ +/** + * Multiparty Messaging Test (Three Clients) + * Tests the multiparty/group messaging workflow: + * 1. Three clients (client1, client2, client3/aggregator) onboard + * 2. All three exchange keys (import each other as contacts) + * 3. Aggregator creates a group chat with all three participants + * 4. Verify all 3 participants appear on every client's thread + * 5. Verify from/to fields are correct (sender never sees own email in "to") + * 6. Client1 and client2 reply in the same thread + * 7. All participants see all messages in the shared thread + * 8. N-to-N: each pair sends a separate 1:1 message; verify no cross-contamination + * + * Usage: + * ./test-scenario.sh --pipelines-multiparty --interactive + * + * @tag pipelines-multiparty + */ +import { expect, test, type Page } from './playwright-fixtures' +import WebSocket from 'ws' +import { waitForAppReady, ensureProfileSelected } from './test-helpers.js' +import { setWsPort, completeOnboarding, ensureLogSocket, log } from './onboarding-helper.js' + +const TEST_TIMEOUT = 300_000 // 5 minutes max +const UI_TIMEOUT = 10_000 +const MESSAGE_TIMEOUT = 90_000 // 90 seconds for message delivery (3 clients = more traffic) +const SYNC_INTERVAL = 500 // ms between sync polls + +test.describe.configure({ timeout: TEST_TIMEOUT }) + +interface Backend { + invoke: (cmd: string, args?: Record, timeoutMs?: number) => Promise + close: () => Promise +} + +async function connectBackend(port: number): Promise { + const socket = new WebSocket(`ws://localhost:${port}`) + await new Promise((resolve, reject) => { + const timeout = setTimeout( + () => reject(new Error(`WS connect timeout on port ${port}`)), + 10_000, + ) + socket.once('open', () => { + clearTimeout(timeout) + resolve() + }) + socket.once('error', (err) => { + clearTimeout(timeout) + reject(err) + }) + }) + + let nextId = 0 + const pending = new Map void; reject: (e: any) => void }>() + + socket.on('message', (data) => { + let parsed: any + try { + parsed = JSON.parse(data.toString()) + } catch { + return + } + const entry = pending.get(parsed?.id) + if (!entry) return + pending.delete(parsed.id) + if (parsed.error) entry.reject(new Error(parsed.error)) + else entry.resolve(parsed.result) + }) + + function invoke(cmd: string, args: Record = {}, timeoutMs = 30_000) { + const id = ++nextId + socket.send(JSON.stringify({ id, cmd, args })) + return new Promise((resolve, reject) => { + pending.set(id, { resolve, reject }) + setTimeout(() => { + if (!pending.has(id)) return + pending.delete(id) + reject(new Error(`WS invoke timeout: ${cmd}`)) + }, timeoutMs) + }) + } + + async function close() { + if (socket.readyState !== WebSocket.OPEN) return + await new Promise((resolve) => { + socket.once('close', () => resolve()) + socket.close() + }) + } + + return { invoke, close } +} + +async function waitForMessage( + backend: Backend, + targetBody: string, + timeoutMs = MESSAGE_TIMEOUT, +): Promise { + const start = Date.now() + while (Date.now() - start < timeoutMs) { + try { + await backend.invoke('sync_messages_with_failures') + } catch { + // Ignore transient sync errors + } + + const threads = await backend.invoke('list_message_threads', { scope: 'all' }) + if (Array.isArray(threads)) { + for (const thread of threads) { + const threadId = thread?.thread_id || thread?.threadId || thread?.id + if (!threadId) continue + const msgs = await backend.invoke('get_thread_messages', { threadId }) + if (Array.isArray(msgs) && msgs.some((m: any) => (m?.body || '').includes(targetBody))) { + return + } + } + } + await new Promise((r) => setTimeout(r, SYNC_INTERVAL)) + } + throw new Error(`Message not found within timeout: ${targetBody}`) +} + +async function findGroupThread( + backend: Backend, + expectedThreadId: string, + timeoutMs = MESSAGE_TIMEOUT, +): Promise { + const start = Date.now() + while (Date.now() - start < timeoutMs) { + try { + await backend.invoke('sync_messages_with_failures') + } catch { + // Ignore transient sync errors + } + + const threads = await backend.invoke('list_message_threads', { scope: 'all' }) + if (Array.isArray(threads)) { + for (const thread of threads) { + const threadId = thread?.thread_id || '' + if (threadId === expectedThreadId) { + return threadId + } + } + } + await new Promise((r) => setTimeout(r, SYNC_INTERVAL)) + } + return null +} + +async function getThreadSummary(backend: Backend, targetThreadId: string): Promise { + const threads = await backend.invoke('list_message_threads', { scope: 'all' }) + if (Array.isArray(threads)) { + return threads.find((t: any) => t.thread_id === targetThreadId) || null + } + return null +} + +async function getThreadMessages(backend: Backend, threadId: string): Promise { + const msgs = await backend.invoke('get_thread_messages', { threadId }) + return Array.isArray(msgs) ? msgs : [] +} + +async function countMessagesInThread(backend: Backend, threadId: string): Promise { + const msgs = await getThreadMessages(backend, threadId) + return msgs.length +} + +async function getFirstMessageInThread(backend: Backend, threadId: string): Promise { + const msgs = await getThreadMessages(backend, threadId) + if (msgs.length > 0) { + const sorted = msgs.sort((a: any, b: any) => { + const aTime = new Date(a.created_at).getTime() + const bTime = new Date(b.created_at).getTime() + return aTime - bTime + }) + return sorted[0]?.id || null + } + return null +} + +async function syncAll(...backends: Backend[]) { + for (const b of backends) { + try { + await b.invoke('sync_messages_with_failures') + } catch { + // Ignore transient sync errors + } + } +} + +function assertParticipants(actual: string[], expected: string[], context: string) { + const sortedActual = [...actual].sort() + const sortedExpected = [...expected].sort() + const missing = sortedExpected.filter((e) => !sortedActual.includes(e)) + const extra = sortedActual.filter((a) => !sortedExpected.includes(a)) + if (missing.length > 0 || extra.length > 0) { + throw new Error( + `${context}: participant mismatch\n` + + ` expected: [${sortedExpected.join(', ')}]\n` + + ` actual: [${sortedActual.join(', ')}]\n` + + (missing.length > 0 ? ` missing: [${missing.join(', ')}]\n` : '') + + (extra.length > 0 ? ` extra: [${extra.join(', ')}]\n` : ''), + ) + } +} + +test.describe('Multiparty messaging between three clients @pipelines-multiparty', () => { + test('three clients create and participate in a group chat', async ({ browser }) => { + const wsPortBase = Number.parseInt(process.env.DEV_WS_BRIDGE_PORT_BASE || '3333', 10) + const wsPort1 = wsPortBase + const wsPort2 = wsPortBase + 1 + const wsPort3 = wsPortBase + 2 + + const email1 = process.env.CLIENT1_EMAIL || 'client1@sandbox.local' + const email2 = process.env.CLIENT2_EMAIL || 'client2@sandbox.local' + const email3 = process.env.AGG_EMAIL || 'aggregator@sandbox.local' + const allEmails = [email1, email2, email3].sort() + + const logSocket = await ensureLogSocket() + log(logSocket, { + event: 'multiparty-start', + email1, + email2, + email3, + wsPort1, + wsPort2, + wsPort3, + }) + + console.log('=== Multiparty Group Chat Test ===') + console.log(`Client 1: ${email1} (WS port ${wsPort1})`) + console.log(`Client 2: ${email2} (WS port ${wsPort2})`) + console.log(`Client 3 (Aggregator): ${email3} (WS port ${wsPort3})`) + + // Create pages for all three clients + const page1 = await browser.newPage() + const page2 = await browser.newPage() + const page3 = await browser.newPage() + + await setWsPort(page1, wsPort1) + await setWsPort(page2, wsPort2) + await setWsPort(page3, wsPort3) + + // Connect backends + console.log('\n--- Connecting backends ---') + const backend1 = await connectBackend(wsPort1) + const backend2 = await connectBackend(wsPort2) + const backend3 = await connectBackend(wsPort3) + console.log('All backends connected') + + // Navigate to UI + const uiBaseUrl = process.env.UI_BASE_URL || 'http://localhost:8082' + await page1.goto(uiBaseUrl) + await page2.goto(uiBaseUrl) + await page3.goto(uiBaseUrl) + + // Complete onboarding for all three + console.log('\n--- Onboarding ---') + await completeOnboarding(page1, email1, logSocket) + await completeOnboarding(page2, email2, logSocket) + await completeOnboarding(page3, email3, logSocket) + console.log('All clients onboarded') + + // Ensure dev mode is available + await backend1.invoke('get_dev_mode_info') + await backend2.invoke('get_dev_mode_info') + await backend3.invoke('get_dev_mode_info') + + // === Key Exchange Phase === + console.log('\n--- Key Exchange Phase ---') + console.log('Each client importing the other two as contacts...') + + await backend1.invoke('network_import_contact', { identity: email2 }) + console.log(` ${email1} imported ${email2}`) + await backend1.invoke('network_import_contact', { identity: email3 }) + console.log(` ${email1} imported ${email3}`) + + await backend2.invoke('network_import_contact', { identity: email1 }) + console.log(` ${email2} imported ${email1}`) + await backend2.invoke('network_import_contact', { identity: email3 }) + console.log(` ${email2} imported ${email3}`) + + await backend3.invoke('network_import_contact', { identity: email1 }) + console.log(` ${email3} imported ${email1}`) + await backend3.invoke('network_import_contact', { identity: email2 }) + console.log(` ${email3} imported ${email2}`) + + log(logSocket, { event: 'key-exchange-complete' }) + console.log('All key exchanges complete!') + + // === Create Group Chat === + console.log('\n--- Creating Group Chat ---') + const timestamp = Date.now() + const groupSubject = `Group Chat Test - ${timestamp}` + const initialMessage = `Hello everyone! This is a group chat started by the aggregator. - ${timestamp}` + + console.log(`${email3} creating group chat with ${email1} and ${email2}...`) + + const groupMessage = await backend3.invoke('send_message', { + request: { + recipients: [email1, email2], + body: initialMessage, + subject: groupSubject, + }, + }) + + console.log(`Group chat created! Thread ID: ${groupMessage.thread_id}`) + log(logSocket, { event: 'group-chat-created', threadId: groupMessage.thread_id }) + + // === Verify the sent message has correct from/to on sender === + console.log('\n--- Verifying sender message fields ---') + expect(groupMessage.from).toBe(email3) + expect(groupMessage.to).not.toBe(email3) + console.log( + ` ✓ Sender (${email3}): from=${groupMessage.from}, to=${groupMessage.to} (not self)`, + ) + + // === Verify All Participants Receive the Group Message === + console.log('\n--- Verifying Group Message Delivery ---') + + console.log(`Checking ${email1} received group message...`) + await waitForMessage(backend1, initialMessage) + console.log(` ✓ ${email1} received the group message`) + + console.log(`Checking ${email2} received group message...`) + await waitForMessage(backend2, initialMessage) + console.log(` ✓ ${email2} received the group message`) + + log(logSocket, { event: 'group-message-delivered-to-all' }) + + // === Find the Group Thread on all 3 clients === + console.log('\n--- Finding Group Thread on all clients ---') + const expectedThreadId = groupMessage.thread_id + console.log(`Looking for thread ID: ${expectedThreadId}`) + + const groupThreadId1 = await findGroupThread(backend1, expectedThreadId) + const groupThreadId2 = await findGroupThread(backend2, expectedThreadId) + const groupThreadId3 = await findGroupThread(backend3, expectedThreadId) + + if (!groupThreadId1 || !groupThreadId2 || !groupThreadId3) { + throw new Error( + `Group thread ${expectedThreadId} not found on all clients: ` + + `c1=${groupThreadId1}, c2=${groupThreadId2}, c3=${groupThreadId3}`, + ) + } + console.log(` ✓ Thread found on all 3 clients`) + + // === Verify thread participants on every client show all 3 emails === + console.log('\n--- Verifying thread participants (all 3 must appear) ---') + + const summary1 = await getThreadSummary(backend1, expectedThreadId) + const summary2 = await getThreadSummary(backend2, expectedThreadId) + const summary3 = await getThreadSummary(backend3, expectedThreadId) + + assertParticipants(summary1.participants, allEmails, `Client1 (${email1}) thread participants`) + console.log(` ✓ ${email1} sees participants: [${summary1.participants.sort().join(', ')}]`) + + assertParticipants(summary2.participants, allEmails, `Client2 (${email2}) thread participants`) + console.log(` ✓ ${email2} sees participants: [${summary2.participants.sort().join(', ')}]`) + + assertParticipants(summary3.participants, allEmails, `Client3 (${email3}) thread participants`) + console.log(` ✓ ${email3} sees participants: [${summary3.participants.sort().join(', ')}]`) + + // === Verify from/to fields on received messages === + console.log('\n--- Verifying from/to fields on received messages ---') + + const msgsOnClient1 = await getThreadMessages(backend1, expectedThreadId) + const msgsOnClient2 = await getThreadMessages(backend2, expectedThreadId) + const msgsOnClient3 = await getThreadMessages(backend3, expectedThreadId) + + for (const msg of msgsOnClient1) { + expect(msg.from).toBe(email3) + expect(msg.to).not.toBe(msg.from) + console.log(` ${email1} sees msg: from=${msg.from} to=${msg.to}`) + } + + for (const msg of msgsOnClient2) { + expect(msg.from).toBe(email3) + expect(msg.to).not.toBe(msg.from) + console.log(` ${email2} sees msg: from=${msg.from} to=${msg.to}`) + } + + for (const msg of msgsOnClient3) { + expect(msg.from).toBe(email3) + expect(msg.to).not.toBe(msg.from) + console.log(` ${email3} sees msg: from=${msg.from} to=${msg.to}`) + } + console.log(` ✓ All from/to fields correct (no sender seeing own email in "to")`) + + // === Replies in the Group Thread === + console.log('\n--- Sending Replies in Group Thread ---') + + const msgId1 = await getFirstMessageInThread(backend1, expectedThreadId) + const msgId2 = await getFirstMessageInThread(backend2, expectedThreadId) + + if (!msgId1 || !msgId2) { + throw new Error('Could not find original message in thread for replies') + } + + const reply1 = `Reply from Client1 in the group! - ${timestamp}` + console.log(`${email1} sending reply...`) + const reply1Result = await backend1.invoke('send_message', { + request: { + body: reply1, + reply_to: msgId1, + }, + }) + console.log(` ✓ ${email1} sent reply`) + + // Verify reply from/to on sender side + expect(reply1Result.from).toBe(email1) + expect(reply1Result.to).not.toBe(email1) + console.log( + ` ✓ Reply from ${email1}: from=${reply1Result.from} to=${reply1Result.to} (not self)`, + ) + + const reply2 = `Reply from Client2 in the group! - ${timestamp}` + console.log(`${email2} sending reply...`) + const reply2Result = await backend2.invoke('send_message', { + request: { + body: reply2, + reply_to: msgId2, + }, + }) + console.log(` ✓ ${email2} sent reply`) + + expect(reply2Result.from).toBe(email2) + expect(reply2Result.to).not.toBe(email2) + console.log( + ` ✓ Reply from ${email2}: from=${reply2Result.from} to=${reply2Result.to} (not self)`, + ) + + log(logSocket, { event: 'group-replies-sent' }) + + // === Verify All Participants See All Messages === + console.log('\n--- Verifying All Replies Delivered ---') + + console.log(`Checking ${email3} received all replies...`) + await waitForMessage(backend3, reply1) + console.log(` ✓ ${email3} received reply from ${email1}`) + await waitForMessage(backend3, reply2) + console.log(` ✓ ${email3} received reply from ${email2}`) + + console.log(`Checking ${email1} received reply from ${email2}...`) + await waitForMessage(backend1, reply2) + console.log(` ✓ ${email1} received reply from ${email2}`) + + console.log(`Checking ${email2} received reply from ${email1}...`) + await waitForMessage(backend2, reply1) + console.log(` ✓ ${email2} received reply from ${email1}`) + + log(logSocket, { event: 'all-replies-delivered' }) + + // === Final sync and full thread verification === + console.log('\n--- Final Thread Verification ---') + const threadId = groupMessage.thread_id + + await new Promise((r) => setTimeout(r, 2000)) + await syncAll(backend1, backend2, backend3) + + const count1 = await countMessagesInThread(backend1, threadId) + const count2 = await countMessagesInThread(backend2, threadId) + const count3 = await countMessagesInThread(backend3, threadId) + + console.log(`Thread message counts:`) + console.log(` ${email1}: ${count1} messages`) + console.log(` ${email2}: ${count2} messages`) + console.log(` ${email3}: ${count3} messages`) + + // Each client should have at least 3 messages (initial + 2 replies) + expect(count1).toBeGreaterThanOrEqual(3) + expect(count2).toBeGreaterThanOrEqual(3) + expect(count3).toBeGreaterThanOrEqual(3) + console.log(` ✓ All clients have >= 3 messages in group thread`) + + // === Verify from/to on all messages after replies === + console.log('\n--- Verifying from/to on all thread messages (post-replies) ---') + + const allBackends = [ + { backend: backend1, email: email1 }, + { backend: backend2, email: email2 }, + { backend: backend3, email: email3 }, + ] + + for (const { backend, email } of allBackends) { + const msgs = await getThreadMessages(backend, threadId) + for (const msg of msgs) { + // "from" must never be empty + expect(msg.from).toBeTruthy() + // Sender should never see their own email in "to" + if (msg.from === email) { + expect(msg.to).not.toBe(email) + } + // "to" must never be empty + expect(msg.to).toBeTruthy() + } + console.log( + ` ✓ ${email}: all ${msgs.length} messages have valid from/to (no self-addressed)`, + ) + } + + // === Verify participants still show all 3 after replies === + console.log('\n--- Verifying participants after replies ---') + for (const { backend, email } of allBackends) { + const summary = await getThreadSummary(backend, threadId) + assertParticipants( + summary.participants, + allEmails, + `${email} thread participants (post-replies)`, + ) + } + console.log(` ✓ All clients still show all 3 participants after replies`) + + // === Verify group_chat metadata on messages === + console.log('\n--- Verifying group_chat metadata ---') + for (const { backend, email } of allBackends) { + const msgs = await getThreadMessages(backend, threadId) + for (const msg of msgs) { + if (msg.metadata?.group_chat) { + const gc = msg.metadata.group_chat + expect(gc.is_group).toBe(true) + expect(gc.participants).toBeDefined() + const metaParticipants = [...gc.participants].sort() + expect(metaParticipants).toEqual(allEmails) + } + } + console.log(` ✓ ${email}: group_chat metadata correct on all messages`) + } + + log(logSocket, { event: 'group-verification-complete' }) + + // ===================================================================== + // === N-to-N: 1:1 messages between every pair, verify thread isolation + // ===================================================================== + console.log('\n--- N-to-N: Sending 1:1 messages between every pair ---') + + const pairs = [ + { from: backend1, fromEmail: email1, to: backend2, toEmail: email2 }, + { from: backend2, fromEmail: email2, to: backend1, toEmail: email1 }, + { from: backend1, fromEmail: email1, to: backend3, toEmail: email3 }, + { from: backend3, fromEmail: email3, to: backend1, toEmail: email1 }, + { from: backend2, fromEmail: email2, to: backend3, toEmail: email3 }, + { from: backend3, fromEmail: email3, to: backend2, toEmail: email2 }, + ] + + const pairMessages: Array<{ + fromEmail: string + toEmail: string + body: string + threadId: string + }> = [] + + for (const { from, fromEmail, to, toEmail } of pairs) { + const body = `DM from ${fromEmail} to ${toEmail} - ${timestamp}-${Math.random().toString(36).slice(2, 8)}` + const subject = `DM ${fromEmail} -> ${toEmail}` + + console.log(` Sending: ${fromEmail} -> ${toEmail}`) + const result = await from.invoke('send_message', { + request: { + recipients: [toEmail], + body, + subject, + }, + }) + + // Verify sender fields + expect(result.from).toBe(fromEmail) + expect(result.to).toBe(toEmail) + + pairMessages.push({ + fromEmail, + toEmail, + body, + threadId: result.thread_id, + }) + } + + console.log(` ✓ All 6 DMs sent`) + + // Wait for all DMs to be delivered + console.log('\n--- Verifying DM delivery ---') + for (const pm of pairMessages) { + const recipientBackend = + pm.toEmail === email1 ? backend1 : pm.toEmail === email2 ? backend2 : backend3 + await waitForMessage(recipientBackend, pm.body) + console.log(` ✓ ${pm.toEmail} received DM from ${pm.fromEmail}`) + } + + // Final sync + await new Promise((r) => setTimeout(r, 2000)) + await syncAll(backend1, backend2, backend3) + + // === Verify thread isolation: DMs don't leak into other threads === + console.log('\n--- Verifying thread isolation ---') + + for (const { backend, email } of allBackends) { + const threads = await backend.invoke('list_message_threads', { scope: 'all' }) + + for (const thread of threads) { + const tid = thread.thread_id + const msgs = await getThreadMessages(backend, tid) + const participants = new Set() + for (const m of msgs) { + participants.add(m.from) + participants.add(m.to) + } + + // If this is a 1:1 thread (not the group), it should have exactly 2 participants + if (tid !== threadId) { + const participantList = [...participants] + // Verify no 3rd-party messages leaked into a 1:1 thread + for (const m of msgs) { + const expectedSenders = participantList + if (!expectedSenders.includes(m.from)) { + throw new Error( + `Thread isolation violation on ${email}: ` + + `message from ${m.from} found in thread between [${participantList.join(', ')}]`, + ) + } + } + } + } + console.log(` ✓ ${email}: no cross-thread contamination detected`) + } + + // === Verify DM from/to correctness === + console.log('\n--- Verifying DM from/to correctness ---') + for (const pm of pairMessages) { + const senderBackend = + pm.fromEmail === email1 ? backend1 : pm.fromEmail === email2 ? backend2 : backend3 + const recipientBackend = + pm.toEmail === email1 ? backend1 : pm.toEmail === email2 ? backend2 : backend3 + + // Check sender's view + const senderMsgs = await getThreadMessages(senderBackend, pm.threadId) + const senderMsg = senderMsgs.find((m: any) => m.body === pm.body) + if (senderMsg) { + expect(senderMsg.from).toBe(pm.fromEmail) + expect(senderMsg.to).toBe(pm.toEmail) + } + + // Check recipient's view + const recipientMsgs = await getThreadMessages(recipientBackend, pm.threadId) + const recipientMsg = recipientMsgs.find((m: any) => m.body === pm.body) + if (recipientMsg) { + expect(recipientMsg.from).toBe(pm.fromEmail) + // Recipient should see the message addressed to themselves + expect(recipientMsg.to).toBe(pm.toEmail) + } + } + console.log(` ✓ All 6 DMs have correct from/to on both sender and recipient side`) + + console.log('\n=== Group Chat + N-to-N Test Complete! ===') + log(logSocket, { event: 'multiparty-test-complete' }) + + // Interactive mode pause + if (process.env.INTERACTIVE_MODE === '1') { + console.log('\n--- Interactive Mode ---') + console.log('All three clients are ready with group chat. You can interact with them:') + console.log(` Client 1: ${uiBaseUrl}?ws=${wsPort1}&real=1`) + console.log(` Client 2: ${uiBaseUrl}?ws=${wsPort2}&real=1`) + console.log(` Client 3: ${uiBaseUrl}?ws=${wsPort3}&real=1`) + console.log('\nInteractive pause disabled; continuing cleanup.') + } + + // Cleanup + await backend1.close() + await backend2.close() + await backend3.close() + + if (logSocket) { + await new Promise((resolve) => { + logSocket.once('close', () => resolve()) + logSocket.close() + }) + } + }) +}) diff --git a/tests/ui/onboarding-helper.ts b/tests/ui/onboarding-helper.ts index e7b70d23..62bc9919 100644 --- a/tests/ui/onboarding-helper.ts +++ b/tests/ui/onboarding-helper.ts @@ -9,10 +9,23 @@ import { ensureProfileSelected, waitForAppReady } from './test-helpers.js' export async function ensureLogSocket(): Promise { if (!process.env.UNIFIED_LOG_WS) return null const socket = new WebSocket(process.env.UNIFIED_LOG_WS) - await new Promise((resolve, reject) => { - socket.once('open', resolve) - socket.once('error', reject) + await new Promise((resolve) => { + let settled = false + const done = () => { + if (settled) return + settled = true + resolve() + } + socket.once('open', done) + socket.once('error', done) + setTimeout(done, 1500) }) + if (socket.readyState !== WebSocket.OPEN) { + try { + socket.terminate() + } catch {} + return null + } return socket } @@ -54,197 +67,286 @@ export async function completeOnboarding( ): Promise { log(logSocket, { event: 'onboarding-start', email }) - // Set up a persistent dialog handler that accepts all dialogs during onboarding. - // This is more robust than page.once() which can miss dialogs due to timing. - const dialogHandler = (dialog: import('@playwright/test').Dialog) => { - console.log( - `[onboarding] Accepting dialog: ${dialog.type()} - ${dialog.message().slice(0, 50)}`, - ) - dialog.accept().catch(() => {}) - } - page.on('dialog', dialogHandler) + const isVisible = async (selector: string, timeout = 1000): Promise => + page + .locator(selector) + .isVisible({ timeout }) + .catch(() => false) + const waitVisible = async (selector: string, timeout = 1000): Promise => + page + .locator(selector) + .waitFor({ state: 'visible', timeout }) + .then(() => true) + .catch(() => false) - try { - await waitForAppReady(page, { timeout: 30_000 }) - if (await ensureProfileSelected(page, { timeout: 30_000 })) { + for (let pass = 1; pass <= 2; pass += 1) { + try { await waitForAppReady(page, { timeout: 30_000 }) - } - - // The initial HTML ships with the main app layout visible (Run tab active) before the onboarding - // check completes. Only treat onboarding as "already complete" once the onboarding view is not - // active/visible. - const onboardingView = page.locator('#onboarding-view') - const onboardingActive = await onboardingView.isVisible({ timeout: 1000 }).catch(() => false) - if (!onboardingActive) { - log(logSocket, { event: 'onboarding-already-complete', email }) - console.log(`${email}: Already onboarded, skipping`) - return false // Already onboarded - } + if (await ensureProfileSelected(page, { timeout: 30_000 })) { + await waitForAppReady(page, { timeout: 30_000 }) + } - console.log(`${email}: Starting onboarding...`) + // The initial HTML ships with the main app layout visible (Run tab active) before the onboarding + // check completes. Only treat onboarding as "already complete" once the onboarding view is not active. + const onboardingView = page.locator('#onboarding-view') + const onboardingActive = await onboardingView.isVisible({ timeout: 1000 }).catch(() => false) + if (!onboardingActive) { + log(logSocket, { event: 'onboarding-already-complete', email }) + console.log(`${email}: Already onboarded, skipping`) + return false + } - // Step 1: Welcome - await expect(page.locator('#onboarding-step-1')).toBeVisible({ timeout: 5000 }) - await page.locator('#onboarding-next-1').click() - // Wait for step 1 to be hidden before checking step 2 - await expect(page.locator('#onboarding-step-1')).toBeHidden({ timeout: 5000 }) + console.log(`${email}: Starting onboarding... (pass ${pass}/2)`) - // Step 2: Dependencies - skip - await expect(page.locator('#onboarding-step-2')).toBeVisible({ timeout: 5000 }) - console.log(`${email}: [onboarding] Clicking skip-dependencies-btn...`) - const step2StartTime = Date.now() - await page.locator('#skip-dependencies-btn').click() - // Wait for step 2 to be hidden before checking step 3 - // Increased timeout: dialog acceptance + invoke('update_saved_dependency_states') can take time in CI - await expect(page.locator('#onboarding-step-2')).toBeHidden({ timeout: 30000 }) - console.log(`${email}: [onboarding] Step 2 hidden after ${Date.now() - step2StartTime}ms`) + // Step 1: Welcome + if (await waitVisible('#onboarding-step-1', 8000)) { + await page.locator('#onboarding-next-1').click() + await expect(page.locator('#onboarding-step-1')).toBeHidden({ timeout: 8000 }) + } - // Step 3: Choose BioVault Home - await expect(page.locator('#onboarding-step-3')).toBeVisible({ timeout: 5000 }) - const homeInput = page.locator('#onboarding-home') - await expect(homeInput).toBeVisible({ timeout: 10_000 }) - if (!(await homeInput.inputValue()).trim()) { - // Home defaults are populated asynchronously; try to pull them directly if still empty. - const fallbackHome = await page.evaluate(async () => { + // Step 2: Dependencies - skip + if (await waitVisible('#onboarding-step-2', 5000)) { + console.log(`${email}: [onboarding] Clicking skip-dependencies-btn...`) + const step2StartTime = Date.now() + const skipBtn = page.locator('#skip-dependencies-btn') + const dialogHandler = async (dialog: any) => { + console.log( + `[onboarding] Accepting dialog: ${dialog.type()} - ${dialog.message().slice(0, 50)}`, + ) + await dialog.accept().catch(() => {}) + } + page.on('dialog', dialogHandler) try { - const invoke = (window as any)?.__TAURI__?.invoke - if (!invoke) return '' - const value = await invoke('profiles_get_default_home') - return typeof value === 'string' ? value : '' - } catch (_err) { - return '' + for (let attempt = 1; attempt <= 3; attempt++) { + const clickErr = await skipBtn + .click({ force: true, timeout: 8_000 }) + .then(() => null) + .catch((err) => err) + if (clickErr) { + console.log( + `${email}: [onboarding] Step 2 click attempt ${attempt} failed, trying JS fallback`, + ) + await page + .evaluate(() => { + const btn = document.querySelector( + '#skip-dependencies-btn', + ) as HTMLButtonElement | null + btn?.click() + }) + .catch(() => {}) + } + + const step2Hidden = await page + .locator('#onboarding-step-2') + .isHidden({ timeout: 12_000 }) + .catch(() => false) + const step4Visible = await isVisible('#onboarding-step-4', 500) + const runViewVisible = await isVisible('#run-view', 500) + if (step2Hidden || step4Visible || runViewVisible) { + console.log( + `${email}: [onboarding] Step 2 complete after ${Date.now() - step2StartTime}ms (attempt ${attempt})`, + ) + break + } + console.log( + `${email}: [onboarding] Step 2 still visible after attempt ${attempt}, retrying...`, + ) + if (attempt === 3) { + const step2Html = await page + .locator('#onboarding-step-2') + .innerHTML() + .catch(() => '') + const onboardingState = await page + .evaluate(() => ({ + checkComplete: (window as any).__ONBOARDING_CHECK_COMPLETE__ ?? null, + navReady: (window as any).__NAV_HANDLERS_READY__ ?? null, + eventReady: (window as any).__EVENT_HANDLERS_READY__ ?? null, + })) + .catch(() => null) + throw new Error( + `${email}: onboarding step 2 did not advance after 3 attempts (${Date.now() - step2StartTime}ms). ` + + `state=${JSON.stringify(onboardingState)} step2HtmlPrefix=${step2Html.slice(0, 240)}`, + ) + } + } + } finally { + page.off('dialog', dialogHandler) } - }) - if (fallbackHome) { - await homeInput.fill(fallbackHome) } - } - await expect(homeInput).toHaveValue(/.+/, { timeout: 20_000 }) - await page.locator('#onboarding-next-3').click() - // Wait for step 3-email to be visible (home check can take time in CI). - await expect(page.locator('#onboarding-step-3-email')).toBeVisible({ timeout: 20_000 }) - - // Step 3a: Email - await page.fill('#onboarding-email', email) - await expect(page.locator('#onboarding-next-3-email')).toBeEnabled() - await page.locator('#onboarding-next-3-email').click() - // Step 3-key: Key setup - await expect(page.locator('#onboarding-step-3-key')).toBeVisible({ timeout: 5000 }) - await expect(page.locator('#onboarding-next-3-key')).toBeEnabled({ timeout: 30_000 }) - // If the app generated a recovery code, the UI requires an explicit acknowledgement before proceeding. - const recoveryBlock = page.locator('#onboarding-recovery-block') - if (await recoveryBlock.isVisible().catch(() => false)) { - await page.locator('#onboarding-recovery-ack').check() - } - await page.locator('#onboarding-next-3-key').click() - // Wait for step 3-key to be hidden before checking step 4 - await expect(page.locator('#onboarding-step-3-key')).toBeHidden({ timeout: 5000 }) + // Wait for the next onboarding stage to become visible before probing specific sub-steps. + await page + .waitForFunction( + () => { + const visible = (id: string) => { + const el = document.querySelector(id) as HTMLElement | null + if (!el) return false + return ( + el.offsetParent !== null && + window.getComputedStyle(el).display !== 'none' && + window.getComputedStyle(el).visibility !== 'hidden' + ) + } + return ( + visible('#onboarding-step-3') || + visible('#onboarding-step-3-email') || + visible('#onboarding-step-3-key') || + visible('#onboarding-step-4') || + visible('#run-view') + ) + }, + { timeout: 20_000 }, + ) + .catch(() => {}) - // Step 4: SyftBox - skip - await expect(page.locator('#onboarding-step-4')).toBeVisible({ timeout: 30_000 }) - console.log(`${email}: [onboarding] Step 4 visible, clicking skip-syftbox-btn...`) - const step4StartTime = Date.now() - await Promise.all([ - page.waitForNavigation({ waitUntil: 'networkidle' }).catch(() => {}), - page.locator('#skip-syftbox-btn').click(), - ]) - console.log( - `${email}: [onboarding] Navigation complete after ${Date.now() - step4StartTime}ms, waiting for app ready...`, - ) + // Step 3: Choose BioVault Home + if (await waitVisible('#onboarding-step-3', 5000)) { + const homeInput = page.locator('#onboarding-home') + await expect(homeInput).toBeVisible({ timeout: 10_000 }) + if (!(await homeInput.inputValue()).trim()) { + // Home defaults are populated asynchronously; try to pull them directly if still empty. + const fallbackHome = await page.evaluate(async () => { + try { + const invoke = (window as any)?.__TAURI__?.invoke + if (!invoke) return '' + const value = await invoke('profiles_get_default_home') + return typeof value === 'string' ? value : '' + } catch (_err) { + return '' + } + }) + if (fallbackHome) { + await homeInput.fill(fallbackHome) + } + } + await expect(homeInput).toHaveValue(/.+/, { timeout: 20_000 }) + await page.locator('#onboarding-next-3').click() + } - // On a fresh install, completing onboarding triggers a full page reload. - // Wait for the onboarding check to complete (not just for onboarding to be visible). - // After successful complete_onboarding, check_is_onboarded should return true and - // the app should show run-view instead of onboarding-view. - console.log(`${email}: [onboarding] Waiting for onboarding check to complete...`) - await page.waitForFunction( - () => - (window as any).__ONBOARDING_CHECK_COMPLETE__ === true && - (window as any).__NAV_HANDLERS_READY__ === true && - (window as any).__EVENT_HANDLERS_READY__ === true, - { timeout: 30_000 }, - ) - console.log( - `${email}: [onboarding] Onboarding check complete after ${Date.now() - step4StartTime}ms`, - ) + // Step 3a: Email + if (await waitVisible('#onboarding-step-3-email', 20_000)) { + await page.fill('#onboarding-email', email) + await expect(page.locator('#onboarding-next-3-email')).toBeEnabled() + await page.locator('#onboarding-next-3-email').click() + } - // Now check if the app transitioned to the main view or stayed on onboarding - const onboardingStillVisible = await page - .locator('#onboarding-view') - .evaluate((el) => { - return el.classList.contains('active') && window.getComputedStyle(el).display !== 'none' - }) - .catch(() => false) + // Step 3-key: Key setup + if (await waitVisible('#onboarding-step-3-key', 10_000)) { + await expect(page.locator('#onboarding-next-3-key')).toBeEnabled({ timeout: 30_000 }) + const recoveryBlock = page.locator('#onboarding-recovery-block') + if (await recoveryBlock.isVisible().catch(() => false)) { + await page.locator('#onboarding-recovery-ack').check() + } + await page.locator('#onboarding-next-3-key').click() + await expect(page.locator('#onboarding-step-3-key')).toBeHidden({ timeout: 8000 }) + } - if (onboardingStillVisible) { - const ciFlag = !!process.env.CI || process.env.GITHUB_ACTIONS === 'true' - const maxRetries = 12 - for (let attempt = 1; attempt <= maxRetries; attempt += 1) { - const retryCheck = await page - .evaluate(async (useLongTimeout) => { - const invoke = (window as any).__TAURI__?.invoke - if (!invoke) { - return { available: false } - } - const start = Date.now() - try { - const result = await invoke('check_is_onboarded', { - __wsTimeoutMs: useLongTimeout ? 15000 : 5000, - }) - return { available: true, result, durationMs: Date.now() - start } - } catch (err) { - return { available: true, error: String(err), durationMs: Date.now() - start } - } - }, ciFlag) - .catch(() => null) + // Step 4: SyftBox - skip + const step4StartTime = Date.now() + if (await waitVisible('#onboarding-step-4', 30_000)) { + console.log(`${email}: [onboarding] Step 4 visible, clicking skip-syftbox-btn...`) + await Promise.all([ + page.waitForNavigation({ waitUntil: 'networkidle' }).catch(() => {}), + page.locator('#skip-syftbox-btn').click(), + ]) console.log( - `${email}: [onboarding] check_is_onboarded retry ${attempt}/${maxRetries}:`, - JSON.stringify(retryCheck), + `${email}: [onboarding] Navigation complete after ${Date.now() - step4StartTime}ms, waiting for app ready...`, ) - if (retryCheck?.available && retryCheck?.result === true) { - await page.reload({ waitUntil: 'networkidle' }).catch(() => {}) - await waitForAppReady(page, { timeout: 30_000 }) - await expect(page.locator('#run-view')).toBeVisible({ timeout: 30_000 }) - log(logSocket, { event: 'onboarding-complete', email, recovery: 'retry-check' }) - console.log(`${email}: Onboarding complete after retry!`) - return true + } + + console.log(`${email}: [onboarding] Waiting for onboarding check to complete...`) + await page.waitForFunction( + () => + (window as any).__ONBOARDING_CHECK_COMPLETE__ === true && + (window as any).__NAV_HANDLERS_READY__ === true && + (window as any).__EVENT_HANDLERS_READY__ === true, + { timeout: 45_000 }, + ) + console.log( + `${email}: [onboarding] Onboarding check complete after ${Date.now() - step4StartTime}ms`, + ) + + const onboardingStillVisible = await page + .locator('#onboarding-view') + .evaluate((el) => { + return el.classList.contains('active') && window.getComputedStyle(el).display !== 'none' + }) + .catch(() => false) + + if (onboardingStillVisible) { + const ciFlag = !!process.env.CI || process.env.GITHUB_ACTIONS === 'true' + const maxRetries = 12 + for (let attempt = 1; attempt <= maxRetries; attempt += 1) { + const retryCheck = await page + .evaluate(async (useLongTimeout) => { + const invoke = (window as any).__TAURI__?.invoke + if (!invoke) { + return { available: false } + } + const start = Date.now() + try { + const result = await invoke('check_is_onboarded', { + __wsTimeoutMs: useLongTimeout ? 15000 : 5000, + }) + return { available: true, result, durationMs: Date.now() - start } + } catch (err) { + return { available: true, error: String(err), durationMs: Date.now() - start } + } + }, ciFlag) + .catch(() => null) + console.log( + `${email}: [onboarding] check_is_onboarded retry ${attempt}/${maxRetries}:`, + JSON.stringify(retryCheck), + ) + if (retryCheck?.available && retryCheck?.result === true) { + await page.reload({ waitUntil: 'networkidle' }).catch(() => {}) + await waitForAppReady(page, { timeout: 30_000 }) + await expect(page.locator('#run-view')).toBeVisible({ timeout: 30_000 }) + log(logSocket, { event: 'onboarding-complete', email, recovery: 'retry-check' }) + console.log(`${email}: Onboarding complete after retry!`) + return true + } + await page.waitForTimeout(5000) } - await page.waitForTimeout(5000) + + const diag = await page + .evaluate(() => ({ + onboardingCheckComplete: (window as any).__ONBOARDING_CHECK_COMPLETE__, + navReady: (window as any).__NAV_HANDLERS_READY__, + eventReady: (window as any).__EVENT_HANDLERS_READY__, + lastOnboardingCheck: (window as any).__LAST_ONBOARDING_CHECK__, + ci: (window as any).__IS_CI__ || null, + envCI: (window as any).process?.env?.CI || null, + envGithubActions: (window as any).process?.env?.GITHUB_ACTIONS || null, + url: window.location.href, + })) + .catch(() => null) + console.log(`${email}: [onboarding] Page state:`, JSON.stringify(diag)) + throw new Error( + `Onboarding still visible after complete_onboarding and page reload. ` + + `This usually means check_is_onboarded returned false. ` + + `Page state: ${JSON.stringify(diag)}`, + ) } - // Get diagnostic info from the page's console output - const diag = await page - .evaluate(() => ({ - onboardingCheckComplete: (window as any).__ONBOARDING_CHECK_COMPLETE__, - navReady: (window as any).__NAV_HANDLERS_READY__, - eventReady: (window as any).__EVENT_HANDLERS_READY__, - lastOnboardingCheck: (window as any).__LAST_ONBOARDING_CHECK__, - ci: (window as any).__IS_CI__ || null, - envCI: (window as any).process?.env?.CI || null, - envGithubActions: (window as any).process?.env?.GITHUB_ACTIONS || null, - url: window.location.href, - })) - .catch(() => null) - console.log(`${email}: [onboarding] Page state:`, JSON.stringify(diag)) - throw new Error( - `Onboarding still visible after complete_onboarding and page reload. ` + - `This usually means check_is_onboarded returned false. ` + - `Page state: ${JSON.stringify(diag)}`, + await expect(page.locator('#run-view')).toBeVisible({ timeout: 30_000 }) + console.log( + `${email}: [onboarding] run-view visible after ${Date.now() - step4StartTime}ms total`, + ) + log(logSocket, { event: 'onboarding-complete', email }) + console.log(`${email}: Onboarding complete!`) + return true + } catch (err) { + if (pass === 2) throw err + console.log( + `${email}: [onboarding] pass ${pass} failed; reloading and retrying once. error=${String(err)}`, ) + await page.reload({ waitUntil: 'networkidle' }).catch(() => {}) + await waitForAppReady(page, { timeout: 30_000 }).catch(() => {}) } - - await expect(page.locator('#run-view')).toBeVisible({ timeout: 30_000 }) - console.log( - `${email}: [onboarding] run-view visible after ${Date.now() - step4StartTime}ms total`, - ) - log(logSocket, { event: 'onboarding-complete', email }) - console.log(`${email}: Onboarding complete!`) - return true // Onboarding was performed - } finally { - // Clean up the dialog handler - page.off('dialog', dialogHandler) } + + throw new Error(`${email}: onboarding exhausted retries`) } /** diff --git a/tests/ui/playwright-fixtures.ts b/tests/ui/playwright-fixtures.ts index 59c5587a..28983886 100644 --- a/tests/ui/playwright-fixtures.ts +++ b/tests/ui/playwright-fixtures.ts @@ -5,7 +5,10 @@ const pausedTests = new Set() let warnedNoTty = false function shouldPauseInteractive() { - return process.env.INTERACTIVE_MODE === '1' + if (process.env.INTERACTIVE_MODE !== '1') return false + const raw = (process.env.PLAYWRIGHT_INTERACTIVE_PAUSE ?? '').trim().toLowerCase() + if (raw === '0' || raw === 'false' || raw === 'no') return false + return true } async function promptForEnter(message: string) { diff --git a/tests/ui/syqure-multiparty-allele-freq.spec.ts b/tests/ui/syqure-multiparty-allele-freq.spec.ts new file mode 100644 index 00000000..31845759 --- /dev/null +++ b/tests/ui/syqure-multiparty-allele-freq.spec.ts @@ -0,0 +1,1485 @@ +/** + * Syqure Multiparty Flow Test (Three Clients) + * Uses the same invitation system as --pipelines-multiparty-flow, but executes + * the real syqure flow from biovault/flows/multiparty-allele-freq/flow.yaml. + * + * Usage: + * ./test-scenario.sh --syqure-multiparty-allele-freq --interactive + * + * @tag syqure-multiparty-allele-freq + */ +import { expect, test, type Page } from './playwright-fixtures' +import WebSocket from 'ws' +import * as fs from 'node:fs' +import * as path from 'node:path' +import { setWsPort, completeOnboarding, ensureLogSocket, log } from './onboarding-helper.js' + +const TEST_TIMEOUT = 1_800_000 // 30 minutes (syqure runtime can take time) +const UI_TIMEOUT = 20_000 +const SYNC_INTERVAL = 1000 +const MESSAGE_TIMEOUT = 180_000 +const RUN_TIMEOUT_MS = Number.parseInt( + process.env.SYQURE_MULTIPARTY_RUN_TIMEOUT_MS || '1200000', + 10, +) +const ALLELE_FREQ_EXPECTED_FILES = Number.parseInt(process.env.ALLELE_FREQ_COUNT || '1', 10) + +test.describe.configure({ timeout: TEST_TIMEOUT }) + +interface Backend { + invoke: (cmd: string, args?: Record, timeoutMs?: number) => Promise + close: () => Promise +} + +async function connectBackend(port: number): Promise { + const socket = new WebSocket(`ws://localhost:${port}`) + await new Promise((resolve, reject) => { + const timeout = setTimeout( + () => reject(new Error(`WS connect timeout on port ${port}`)), + 10_000, + ) + socket.once('open', () => { + clearTimeout(timeout) + resolve() + }) + socket.once('error', (err) => { + clearTimeout(timeout) + reject(err) + }) + }) + + let nextId = 0 + const pending = new Map void; reject: (e: any) => void }>() + + socket.on('message', (data) => { + let parsed: any + try { + parsed = JSON.parse(data.toString()) + } catch { + return + } + const entry = pending.get(parsed?.id) + if (!entry) return + pending.delete(parsed.id) + if (parsed.error) entry.reject(new Error(parsed.error)) + else entry.resolve(parsed.result) + }) + + function invoke(cmd: string, args: Record = {}, timeoutMs = 30_000) { + const id = ++nextId + socket.send(JSON.stringify({ id, cmd, args })) + return new Promise((resolve, reject) => { + pending.set(id, { resolve, reject }) + setTimeout(() => { + if (!pending.has(id)) return + pending.delete(id) + reject(new Error(`WS invoke timeout: ${cmd}`)) + }, timeoutMs) + }) + } + + async function close() { + if (socket.readyState !== WebSocket.OPEN) return + await new Promise((resolve) => { + socket.once('close', () => resolve()) + socket.close() + }) + } + + return { invoke, close } +} + +function resolveDatasitesRoot(dataDir: string): string { + return path.basename(dataDir) === 'datasites' ? dataDir : path.join(dataDir, 'datasites') +} + +async function getSyftboxDataDir(backend: Backend): Promise { + const info = await backend.invoke('get_syftbox_config_info') + const dataDir = info?.data_dir + if (!dataDir || typeof dataDir !== 'string') { + throw new Error('WS bridge did not return a usable data_dir (get_syftbox_config_info)') + } + return dataDir +} + +function didBundlePath(viewerDataDir: string, identity: string): string { + return path.join(resolveDatasitesRoot(viewerDataDir), identity, 'public', 'crypto', 'did.json') +} + +function normalizeMetadata(metadata: any): any { + if (!metadata) return null + if (typeof metadata === 'string') { + try { + return JSON.parse(metadata) + } catch { + return null + } + } + return metadata +} + +async function waitForThreadMessageMatching( + backend: Backend, + threadId: string, + predicate: (msg: any) => boolean, + label: string, + timeoutMs = MESSAGE_TIMEOUT, +): Promise { + const start = Date.now() + while (Date.now() - start < timeoutMs) { + try { + await backend.invoke('sync_messages_with_failures') + } catch { + // Ignore transient sync failures while polling. + } + const msgs = await backend.invoke('get_thread_messages', { threadId }).catch(() => []) + const found = Array.isArray(msgs) ? msgs.find((msg: any) => predicate(msg)) : null + if (found) return found + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, SYNC_INTERVAL)) + } + throw new Error(`Timed out waiting for thread message: ${label}`) +} + +async function runMultiRecipientCryptoSmoke( + participants: Array<{ email: string; backend: Backend }>, +): Promise { + const smokeTag = `crypto-smoke-${Date.now()}` + console.log(`--- Multi-recipient encryption smoke: ${smokeTag} ---`) + + const sendCases = [ + { from: participants[0], to: [participants[1], participants[2]] }, + { from: participants[1], to: [participants[0], participants[2]] }, + { from: participants[2], to: [participants[0], participants[1]] }, + ] + + for (const sendCase of sendCases) { + const recipientEmails = sendCase.to.map((entry) => entry.email) + const body = `[${smokeTag}] ${sendCase.from.email} -> ${recipientEmails.join(', ')}` + const sent = await sendMessageWithRetry(sendCase.from.backend, { + recipients: recipientEmails, + subject: `Crypto smoke ${smokeTag}`, + body, + metadata: { + crypto_smoke: { + tag: smokeTag, + sender: sendCase.from.email, + recipients: recipientEmails, + }, + }, + }) + const threadId = sent?.thread_id + expect(typeof threadId).toBe('string') + console.log( + ` Sent smoke message: ${sendCase.from.email} -> ${recipientEmails.join(', ')} (thread ${threadId})`, + ) + + await waitForThreadMessageMatching( + sendCase.from.backend, + threadId, + (msg) => String(msg?.body || '').includes(body), + `sender sees smoke message (${sendCase.from.email})`, + ) + + for (const recipient of sendCase.to) { + const received = await waitForThreadMessageMatching( + recipient.backend, + threadId, + (msg) => String(msg?.body || '').includes(body), + `${recipient.email} receives/decrypts smoke message`, + ) + const metadata = normalizeMetadata(received?.metadata) + expect(metadata?.crypto_smoke?.tag).toBe(smokeTag) + } + } + + console.log('Multi-recipient encryption smoke passed for all sender/recipient pairs') +} + +async function sendMessageWithRetry( + backend: Backend, + request: Record, + maxAttempts = 8, +): Promise { + let lastError: unknown = null + for (let attempt = 1; attempt <= maxAttempts; attempt += 1) { + try { + return await backend.invoke('send_message', { request }, 120_000) + } catch (error) { + lastError = error + const message = String(error || '') + const isDbLock = /database is locked/i.test(message) + if (!isDbLock || attempt === maxAttempts) break + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 400 * attempt)) + } + } + throw lastError instanceof Error + ? lastError + : new Error(String(lastError || 'send_message failed')) +} + +async function waitForContactImport( + backend: Backend, + identity: string, + timeoutMs = 120_000, +): Promise { + const start = Date.now() + let lastError = '' + while (Date.now() - start < timeoutMs) { + try { + await backend.invoke('network_import_contact', { identity }) + return + } catch (error) { + lastError = String(error) + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, SYNC_INTERVAL)) + } + throw new Error(`Timed out waiting for DID/contact import for ${identity}: ${lastError}`) +} + +async function waitForDidBundleOnViewer( + label: string, + viewerBackend: Backend, + viewerDataDir: string, + identity: string, + allBackends: Backend[], + timeoutMs = 120_000, +): Promise { + const start = Date.now() + const didPath = didBundlePath(viewerDataDir, identity) + while (Date.now() - start < timeoutMs) { + await Promise.all( + allBackends.map((backend) => backend.invoke('trigger_syftbox_sync').catch(() => {})), + ) + if (fs.existsSync(didPath)) return + await viewerBackend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, SYNC_INTERVAL)) + } + throw new Error(`Timed out waiting for DID bundle (${label}): ${didPath}`) +} + +async function clickMessagesTab(page: Page): Promise { + const navTab = page.locator('.nav-item[data-tab="messages"]').first() + if (await navTab.isVisible().catch(() => false)) { + await navTab.click() + return + } + await page.locator('button:has-text("Messages")').first().click() +} + +async function clickRunsTab(page: Page): Promise { + const navTab = page.locator('.nav-item[data-tab="runs"]').first() + if (await navTab.isVisible().catch(() => false)) { + await navTab.click() + return + } + await page.locator('button:has-text("Runs")').first().click() +} + +async function importGeneratedAlleleFreqFiles( + backend: Backend, + label: string, + expectedCount: number, +): Promise { + if (expectedCount <= 0) return + const dataDir = await getSyftboxDataDir(backend) + const datasitesRoot = resolveDatasitesRoot(dataDir) + const homeDir = path.dirname(datasitesRoot) + const genotypeDir = path.join(homeDir, 'private', 'app_data', 'biovault', 'allele-freq-data') + const samplesheetPath = path.join(genotypeDir, 'samplesheet.csv') + let files: string[] = [] + if (fs.existsSync(samplesheetPath)) { + const rows = fs.readFileSync(samplesheetPath, 'utf8').split(/\r?\n/).filter(Boolean) + files = rows + .slice(1) + .map((row) => row.split(',')[1]?.trim()) + .filter((filePath): filePath is string => Boolean(filePath)) + } + if (files.length === 0) { + const genotypesDir = path.join(genotypeDir, 'genotypes') + if (fs.existsSync(genotypesDir)) { + files = fs + .readdirSync(genotypesDir, { withFileTypes: true }) + .filter((entry) => entry.isFile() && entry.name.endsWith('.txt')) + .map((entry) => path.join(genotypesDir, entry.name)) + } + } + files.sort() + const selected = files.slice(0, expectedCount) + if (selected.length < expectedCount) { + throw new Error( + `${label}: expected ${expectedCount} genotype files in ${genotypeDir}, found ${selected.length}`, + ) + } + const fileMetadata = Object.fromEntries( + selected.map((filePath, idx) => [ + filePath, + { + data_type: 'Genotype', + source: '23andMe', + grch_version: 'GRCh38', + participant_id: `${label.replace(/[^a-z0-9]/gi, '_')}_${idx + 1}`, + }, + ]), + ) + await backend.invoke('import_files_pending', { fileMetadata }, 120_000) + console.log(`${label}: imported ${selected.length} generated genotype files`) +} + +async function clickStepActionButton( + page: Page, + stepId: string, + buttonClass: string, + label: string, + timeoutMs = UI_TIMEOUT, +): Promise { + const startedAt = Date.now() + let lastError = '' + while (Date.now() - startedAt < timeoutMs) { + try { + await clickRunsTab(page) + const openAllBtn = page + .locator('.mp-progress-actions .mp-collapse-btn:has-text("Open All")') + .first() + if (await openAllBtn.isVisible().catch(() => false)) { + await openAllBtn.click().catch(() => {}) + } + + const step = page.locator(`.mp-step[data-step-id="${stepId}"]`).first() + await expect(step).toBeVisible({ timeout: 3_000 }) + const actionBtn = step.locator(`button.${buttonClass}`).first() + await expect(actionBtn).toBeVisible({ timeout: 3_000 }) + await expect(actionBtn).toBeEnabled({ timeout: 3_000 }) + await actionBtn.click() + console.log(`${label}: clicked ${buttonClass} for ${stepId}`) + return + } catch (error) { + lastError = String(error) + await page.waitForTimeout(1_000) + } + } + + throw new Error(`Timed out clicking ${buttonClass} for ${stepId} (${label}): ${lastError}`) +} + +async function waitForLocalStepStatus( + backend: Backend, + sessionId: string, + stepId: string, + expectedStatuses: string[], + label: string, + timeoutMs = RUN_TIMEOUT_MS, +): Promise { + const startedAt = Date.now() + let lastStatus = 'unknown' + let lastError = '' + while (Date.now() - startedAt < timeoutMs) { + try { + const state = await backend.invoke('get_multiparty_flow_state', { sessionId }, 120_000) + const step = (state?.steps || []).find((entry: any) => entry?.id === stepId) + const status = step?.status ? String(step.status) : '' + if (status) { + lastStatus = status + if (expectedStatuses.includes(status)) return + if (status === 'Failed') { + const stepLogs = await backend + .invoke('get_multiparty_step_logs', { sessionId, stepId, lines: 240 }) + .catch(() => '') + throw new Error( + `${label}: step "${stepId}" entered Failed state.\n${String(stepLogs || '')}`, + ) + } + } + } catch (error) { + lastError = String(error) + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 1200)) + } + throw new Error( + `${label}: timed out waiting for step "${stepId}" statuses [${expectedStatuses.join(', ')}] (last=${lastStatus})` + + (lastError ? `\nLast error: ${lastError}` : ''), + ) +} + +async function waitForSessionRunId( + backend: Backend, + sessionId: string, + label: string, + timeoutMs = RUN_TIMEOUT_MS, +): Promise { + const startedAt = Date.now() + let lastRunId = 0 + let lastError = '' + while (Date.now() - startedAt < timeoutMs) { + try { + const state = await backend.invoke('get_multiparty_flow_state', { sessionId }, 120_000) + const runId = Number(state?.run_id || 0) + if (runId > 0) return runId + lastRunId = runId + } catch (error) { + lastError = String(error) + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 1200)) + } + throw new Error( + `${label}: timed out waiting for multiparty run_id > 0 (last=${lastRunId})` + + (lastError ? `\nLast error: ${lastError}` : ''), + ) +} + +async function clickStepActionAndWait( + page: Page, + backend: Backend, + sessionId: string, + stepId: string, + buttonClass: string, + label: string, + expectedStatuses: string[], + timeoutMs = RUN_TIMEOUT_MS, +): Promise { + await clickStepActionButton(page, stepId, buttonClass, label, timeoutMs) + await waitForLocalStepStatus(backend, sessionId, stepId, expectedStatuses, label, timeoutMs) +} + +async function runStepViaBackendAndWait( + backend: Backend, + sessionId: string, + stepId: string, + label: string, + expectedStatuses: string[], + timeoutMs = RUN_TIMEOUT_MS, +): Promise { + const rpcTimeoutMs = Math.max(120_000, Math.min(timeoutMs, 600_000)) + try { + await backend.invoke('run_flow_step', { sessionId, stepId }, rpcTimeoutMs) + console.log(`${label}: backend started ${stepId}`) + } catch (error) { + const message = String(error || '') + if ( + /WS invoke timeout: run_flow_step/i.test(message) || + /step is not ready to run \(status:\s*(completed|shared|running)\)/i.test(message) + ) { + console.log(`${label}: backend run_flow_step transient for ${stepId}: ${message}`) + } else { + throw error + } + } + await waitForLocalStepStatus(backend, sessionId, stepId, expectedStatuses, label, timeoutMs) +} + +async function runStepViaBackendWhenReadyAndWait( + backend: Backend, + sessionId: string, + stepId: string, + label: string, + expectedStatuses: string[], + timeoutMs = RUN_TIMEOUT_MS, +): Promise { + const startedAt = Date.now() + let lastError = '' + const rpcTimeoutMs = Math.max(120_000, Math.min(timeoutMs, 600_000)) + const transientStartError = (message: string): boolean => + /dependency .* not satisfied yet/i.test(message) || + /step is not ready to run \(status:\s*waitingforinputs\)/i.test(message) || + /step is not ready to run \(status:\s*waitingfordependencies\)/i.test(message) || + /step is not ready to run \(status:\s*failed\)/i.test(message) || + /Shell workflow exited with code/i.test(message) || + /WS invoke timeout: run_flow_step/i.test(message) + while (Date.now() - startedAt < timeoutMs) { + try { + await backend.invoke('run_flow_step', { sessionId, stepId }, rpcTimeoutMs) + console.log(`${label}: backend started ${stepId}`) + await waitForLocalStepStatus(backend, sessionId, stepId, expectedStatuses, label, timeoutMs) + return + } catch (error) { + lastError = String(error || '') + if (/step is not ready to run \(status:\s*(completed|shared|running)\)/i.test(lastError)) { + await waitForLocalStepStatus(backend, sessionId, stepId, expectedStatuses, label, timeoutMs) + return + } + // Common transients: dependency/input readiness lags while participants sync. + if (!transientStartError(lastError)) { + throw error + } + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 1500)) + } + throw new Error( + `${label}: timed out waiting to start ${stepId} after dependency checks` + + (lastError ? `\nLast error: ${lastError}` : ''), + ) +} + +async function shareStepViaBackendAndWait( + backend: Backend, + sessionId: string, + stepId: string, + label: string, + timeoutMs = RUN_TIMEOUT_MS, +): Promise { + const rpcTimeoutMs = Math.max(120_000, Math.min(timeoutMs, 600_000)) + try { + await backend.invoke('share_step_outputs', { sessionId, stepId }, rpcTimeoutMs) + console.log(`${label}: backend shared ${stepId}`) + } catch (error) { + const message = String(error || '') + if (/WS invoke timeout: share_step_outputs/i.test(message)) { + console.log(`${label}: backend share_step_outputs transient for ${stepId}: ${message}`) + } else { + throw error + } + } + await waitForLocalStepStatus(backend, sessionId, stepId, ['Shared'], label, timeoutMs) +} + +async function importAndJoinInvitation( + page: Page, + backend: Backend, + label: string, + flowName: string, + genotypeFileCount = 0, +): Promise { + const start = Date.now() + while (Date.now() - start < MESSAGE_TIMEOUT) { + await clickMessagesTab(page) + await backend.invoke('sync_messages_with_failures').catch(() => {}) + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + + const refreshBtn = page.locator('#refresh-messages-btn').first() + if (await refreshBtn.isVisible().catch(() => false)) { + await refreshBtn.click().catch(() => {}) + await page.waitForTimeout(500) + } + + const threadBySubject = page + .locator(`.message-thread-item:has-text("Multiparty Flow: ${flowName}")`) + .first() + if (await threadBySubject.isVisible().catch(() => false)) { + await threadBySubject.click() + } else { + const firstThread = page.locator('.message-thread-item').first() + if (await firstThread.isVisible().catch(() => false)) { + await firstThread.click() + } + } + + const invitationCard = page.locator('.flow-invitation-card').first() + if (await invitationCard.isVisible().catch(() => false)) { + const importBtn = invitationCard.locator( + '.flow-invitation-btn.import-btn, button:has-text("Import Flow")', + ) + const joinBtn = invitationCard.locator( + '.flow-invitation-btn.view-runs-btn, button:has-text("Join Flow"), button:has-text("View Flow")', + ) + + if (await importBtn.isVisible({ timeout: 1500 }).catch(() => false)) { + await importBtn.click() + await page.waitForTimeout(1200) + } + + if (await joinBtn.isVisible({ timeout: 1500 }).catch(() => false)) { + const joinText = (await joinBtn.textContent().catch(() => '')) || '' + if (joinText.includes('View Flow')) { + console.log(`${label}: already joined`) + return + } + await expect(joinBtn).toBeEnabled({ timeout: UI_TIMEOUT }) + await joinBtn.click() + const inputPicker = page.locator('.flow-input-picker-modal').first() + let pickerVisible = false + let alreadyJoined = false + const waitStart = Date.now() + while (Date.now() - waitStart < 20_000) { + if (await inputPicker.isVisible().catch(() => false)) { + pickerVisible = true + break + } + const refreshedJoinText = (await joinBtn.textContent().catch(() => '')) || '' + if (refreshedJoinText.includes('View Flow')) { + alreadyJoined = true + break + } + await page.waitForTimeout(300) + } + if (pickerVisible) { + const genotypeRow = inputPicker + .locator('.flow-input-picker-row') + .filter({ + has: page.locator('.flow-input-picker-label', { hasText: 'genotype_files' }), + }) + .first() + const checkboxes = genotypeRow.locator('input.flow-input-picker-checkbox') + const checkboxCount = await checkboxes.count() + if (genotypeFileCount > 0 && checkboxCount > 0) { + if (checkboxCount < genotypeFileCount) { + throw new Error( + `${label}: expected at least ${genotypeFileCount} genotype files in picker, found ${checkboxCount}`, + ) + } + // Fast-path: use row-level "Select all visible" when present. + const selectAllVisible = genotypeRow + .locator('input.flow-input-picker-select-all') + .first() + if (await selectAllVisible.isVisible().catch(() => false)) { + await selectAllVisible.check() + } + let checkedCount = await checkboxes + .evaluateAll( + (nodes) => nodes.filter((node) => (node as HTMLInputElement).checked).length, + ) + .catch(() => 0) + for ( + let idx = checkedCount; + idx < genotypeFileCount; + idx += 1 // fallback for cases where not all were visible + ) { + await checkboxes.nth(idx).check() + } + checkedCount = await checkboxes + .evaluateAll( + (nodes) => nodes.filter((node) => (node as HTMLInputElement).checked).length, + ) + .catch(() => 0) + if (checkedCount < genotypeFileCount) { + throw new Error( + `${label}: selected ${checkedCount}/${genotypeFileCount} genotype files before Continue`, + ) + } + } else { + const select = genotypeRow.locator('select.flow-input-picker-select').first() + if (genotypeFileCount > 0 && (await select.isVisible().catch(() => false))) { + const allValues = await select.evaluate((node) => { + const options = Array.from((node as HTMLSelectElement).options) + return options.map((option) => option.value).filter(Boolean) + }) + if (allValues.length < genotypeFileCount) { + throw new Error( + `${label}: expected at least ${genotypeFileCount} genotype files in picker, found ${allValues.length}`, + ) + } + const picked = allValues.slice(0, genotypeFileCount) + await select.evaluate((node, values) => { + const wanted = new Set(values) + const input = node as HTMLSelectElement + for (const option of Array.from(input.options)) { + option.selected = wanted.has(option.value) + } + input.dispatchEvent(new Event('change', { bubbles: true })) + }, picked) + } + } + const continueBtn = inputPicker.locator('button.flow-input-picker-confirm').first() + await expect(continueBtn).toBeVisible({ timeout: 15_000 }) + await expect(continueBtn).toBeEnabled({ timeout: 15_000 }) + let submitted = false + for (let attempt = 0; attempt < 4; attempt += 1) { + await continueBtn.click({ force: true }) + const closed = await inputPicker.isHidden({ timeout: 5_000 }).catch(() => false) + if (closed) { + submitted = true + break + } + await page.waitForTimeout(500) + } + if (!submitted) { + throw new Error( + `${label}: configure-flow input modal did not close after pressing Continue`, + ) + } + } else { + if (genotypeFileCount > 0) { + throw new Error( + `${label}: Configure flow inputs modal did not appear for genotype participant`, + ) + } + if (!alreadyJoined) { + throw new Error( + `${label}: Configure flow inputs modal did not appear after clicking Join Flow`, + ) + } + } + console.log(`${label}: joined invitation flow`) + return + } + } + + await page.waitForTimeout(SYNC_INTERVAL) + } + + throw new Error(`${label}: timed out waiting for flow invitation card`) +} + +async function waitForRunStatus( + backend: Backend, + runId: number, + expectedStatuses: string[], + timeoutMs = RUN_TIMEOUT_MS, + label = 'run', +): Promise { + const startTime = Date.now() + let lastStatus = 'unknown' + let lastPollError = '' + let consecutivePollErrors = 0 + while (Date.now() - startTime < timeoutMs) { + let runs: any[] = [] + try { + // get_flow_runs can be slow while Syqure compute is active; allow a longer WS timeout. + runs = await backend.invoke('get_flow_runs', {}, 120_000) + consecutivePollErrors = 0 + } catch (error) { + lastPollError = String(error) + consecutivePollErrors += 1 + if (consecutivePollErrors === 1 || consecutivePollErrors % 10 === 0) { + console.warn( + `${label}: get_flow_runs poll error (${consecutivePollErrors}): ${lastPollError}`, + ) + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 2_000)) + continue + } + const run = (runs || []).find((r: any) => r.id === runId) + if (run?.status && run.status !== lastStatus) { + lastStatus = run.status + console.log(`${label}: run ${runId} status -> ${lastStatus}`) + } + if (run && expectedStatuses.includes(run.status)) { + return run + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 2_000)) + } + let logTail = '' + try { + logTail = String((await backend.invoke('get_flow_run_logs_tail', { runId, lines: 200 })) || '') + } catch { + // Ignore diagnostic failures and surface the timeout. + } + throw new Error( + `Timed out waiting for run ${runId} status: ${expectedStatuses.join(', ')} (last=${lastStatus})` + + (lastPollError ? `\nLast poll error: ${lastPollError}` : '') + + (logTail ? `\nLast log tail:\n${logTail}` : ''), + ) +} + +function collectMatchingFiles(rootDir: string, filename: string): string[] { + if (!rootDir || !fs.existsSync(rootDir)) return [] + const matches: string[] = [] + const stack = [rootDir] + while (stack.length > 0) { + const current = stack.pop()! + for (const entry of fs.readdirSync(current, { withFileTypes: true })) { + const fullPath = path.join(current, entry.name) + if (entry.isDirectory()) { + stack.push(fullPath) + } else if (entry.isFile() && entry.name === filename) { + matches.push(fullPath) + } + } + } + return matches +} + +function assertSharedRunDirExists( + dataDir: string, + ownerEmail: string, + flowName: string, + runId: string, +) { + const datasitesRoot = resolveDatasitesRoot(dataDir) + const runDir = path.join(datasitesRoot, ownerEmail, 'shared', 'flows', flowName, runId) + expect(fs.existsSync(runDir)).toBe(true) + + const hasProgressDir = + fs.existsSync(path.join(runDir, '_progress')) || fs.existsSync(path.join(runDir, 'progress')) + expect(hasProgressDir).toBe(true) +} + +function getSharedRunDir( + dataDir: string, + ownerEmail: string, + flowName: string, + runId: string, +): string { + return path.join(resolveDatasitesRoot(dataDir), ownerEmail, 'shared', 'flows', flowName, runId) +} + +function getSharedStepDirCandidates(runDir: string, stepNumber: number, stepId: string): string[] { + return [ + path.join(runDir, `${stepNumber}-${stepId}`), + path.join(runDir, `${String(stepNumber).padStart(2, '0')}-${stepId}`), + ] +} + +function findExistingSharedStepDir( + runDir: string, + stepNumber: number, + stepId: string, +): string | null { + for (const candidate of getSharedStepDirCandidates(runDir, stepNumber, stepId)) { + if (fs.existsSync(candidate)) return candidate + } + return null +} + +async function waitForCondition( + check: () => boolean, + label: string, + timeoutMs = MESSAGE_TIMEOUT, + pollMs = 1000, +): Promise { + const startedAt = Date.now() + while (Date.now() - startedAt < timeoutMs) { + if (check()) return + await new Promise((resolve) => setTimeout(resolve, pollMs)) + } + throw new Error(`Timed out waiting for condition: ${label}`) +} + +async function waitForSharedFileOnViewers( + participantDataDirs: Map, + ownerEmail: string, + flowName: string, + runId: string, + stepNumber: number, + stepId: string, + fileName: string, + requiredViewerEmails: string[], + timeoutMs = MESSAGE_TIMEOUT, +): Promise { + await waitForCondition( + () => + requiredViewerEmails.every((viewerEmail) => { + const viewerDataDir = participantDataDirs.get(viewerEmail) + if (!viewerDataDir) return false + const runDir = getSharedRunDir(viewerDataDir, ownerEmail, flowName, runId) + const stepDir = findExistingSharedStepDir(runDir, stepNumber, stepId) + if (!stepDir) return false + return fs.existsSync(path.join(stepDir, fileName)) + }), + `${ownerEmail}/${stepId}/${fileName} visible on ${requiredViewerEmails.join(', ')}`, + timeoutMs, + ) +} + +async function assertBuildMasterOutputsNonEmpty( + participantDataDirs: Map, + aggregatorEmail: string, + flowName: string, + runId: string, + timeoutMs = 90_000, +): Promise { + const aggregatorDataDir = participantDataDirs.get(aggregatorEmail) + if (!aggregatorDataDir) { + throw new Error(`Missing data dir for aggregator ${aggregatorEmail}`) + } + const runDir = getSharedRunDir(aggregatorDataDir, aggregatorEmail, flowName, runId) + const stepId = 'build_master' + const stepNumber = 3 + + await waitForCondition( + () => { + const stepDir = findExistingSharedStepDir(runDir, stepNumber, stepId) + if (!stepDir) return false + const unionPath = path.join(stepDir, 'union_locus_index.json') + const countPath = path.join(stepDir, 'count.txt') + if (!fs.existsSync(unionPath) || !fs.existsSync(countPath)) return false + try { + const union = JSON.parse(fs.readFileSync(unionPath, 'utf8')) + const loci = Array.isArray(union?.loci) ? union.loci.length : 0 + const nLoci = Number(union?.n_loci || 0) + const countRaw = fs.readFileSync(countPath, 'utf8').trim() + const count = Number.parseInt(countRaw || '0', 10) + return Number.isFinite(nLoci) && nLoci > 0 && loci > 0 && Number.isFinite(count) && count > 0 + } catch { + return false + } + }, + `build_master produced non-empty union index and count for ${aggregatorEmail}`, + timeoutMs, + 1200, + ) + + const stepDir = findExistingSharedStepDir(runDir, stepNumber, stepId) + if (!stepDir) { + throw new Error(`Missing build_master step directory in ${runDir}`) + } + const unionPath = path.join(stepDir, 'union_locus_index.json') + const countPath = path.join(stepDir, 'count.txt') + const unionRaw = fs.existsSync(unionPath) ? fs.readFileSync(unionPath, 'utf8') : '' + const countRaw = fs.existsSync(countPath) ? fs.readFileSync(countPath, 'utf8') : '' + const union = JSON.parse(unionRaw) + const loci = Array.isArray(union?.loci) ? union.loci.length : 0 + const nLoci = Number(union?.n_loci || 0) + const count = Number.parseInt(String(countRaw).trim() || '0', 10) + expect(nLoci).toBeGreaterThan(0) + expect(loci).toBeGreaterThan(0) + expect(count).toBeGreaterThan(0) +} + +function findParticipantStepStatus( + allProgress: any[], + participantEmail: string, + stepId: string, +): string | null { + const participant = (allProgress || []).find((entry) => entry?.email === participantEmail) + if (!participant) return null + const step = (participant.steps || []).find((entry: any) => entry?.step_id === stepId) + return step?.status || null +} + +async function waitForProgressConvergence( + viewers: Array<{ label: string; backend: Backend }>, + sessionId: string, + expectedStatuses: Array<{ email: string; stepId: string; statuses: string[] }>, + timeoutMs = MESSAGE_TIMEOUT, +): Promise { + const startedAt = Date.now() + while (Date.now() - startedAt < timeoutMs) { + let allSatisfied = true + + for (const viewer of viewers) { + await viewer.backend.invoke('trigger_syftbox_sync').catch(() => {}) + const allProgress = await viewer.backend + .invoke('get_all_participant_progress', { sessionId }) + .catch(() => []) + for (const expected of expectedStatuses) { + const status = findParticipantStepStatus(allProgress, expected.email, expected.stepId) + if (!status || !expected.statuses.includes(status)) { + allSatisfied = false + break + } + } + if (!allSatisfied) break + } + + if (allSatisfied) return + await new Promise((resolve) => setTimeout(resolve, 1200)) + } + + throw new Error('Timed out waiting for cross-participant progress convergence') +} + +test.describe('Syqure flow via multiparty invitation system @syqure-multiparty-allele-freq', () => { + test('three clients join via invitation card and execute real syqure flow', async ({ + browser, + }) => { + const wsPortBase = Number.parseInt(process.env.DEV_WS_BRIDGE_PORT_BASE || '3333', 10) + const wsPort1 = wsPortBase + const wsPort2 = wsPortBase + 1 + const wsPort3 = wsPortBase + 2 + + const email1 = process.env.CLIENT1_EMAIL || 'client1@sandbox.local' + const email2 = process.env.CLIENT2_EMAIL || 'client2@sandbox.local' + const email3 = process.env.AGG_EMAIL || 'aggregator@sandbox.local' + + const flowName = 'multiparty-allele-freq' + const sourceFlowPath = path.join( + process.cwd(), + 'biovault', + 'flows', + 'multiparty-allele-freq', + 'flow.yaml', + ) + expect(fs.existsSync(sourceFlowPath)).toBe(true) + + const alleleFreqPipelinePath = path.join( + process.cwd(), + 'biovault', + 'flows', + 'allele-freq', + 'flow.yaml', + ) + expect(fs.existsSync(alleleFreqPipelinePath)).toBe(true) + + let logSocket: WebSocket | null = null + let backend1: Backend | null = null + let backend2: Backend | null = null + let backend3: Backend | null = null + let page1: Page | null = null + let page2: Page | null = null + let page3: Page | null = null + + try { + logSocket = await ensureLogSocket() + log(logSocket, { + event: 'syqure-multiparty-allele-freq-start', + email1, + email2, + email3, + flowName, + }) + + page1 = await browser.newPage() + page2 = await browser.newPage() + page3 = await browser.newPage() + + await setWsPort(page1, wsPort1) + await setWsPort(page2, wsPort2) + await setWsPort(page3, wsPort3) + + backend1 = await connectBackend(wsPort1) + backend2 = await connectBackend(wsPort2) + backend3 = await connectBackend(wsPort3) + + const uiBaseUrl = process.env.UI_BASE_URL || 'http://localhost:8082' + await page1.goto(uiBaseUrl) + await page2.goto(uiBaseUrl) + await page3.goto(uiBaseUrl) + + await completeOnboarding(page1, email1, logSocket) + await completeOnboarding(page2, email2, logSocket) + await completeOnboarding(page3, email3, logSocket) + await importGeneratedAlleleFreqFiles(backend1, email1, ALLELE_FREQ_EXPECTED_FILES) + await importGeneratedAlleleFreqFiles(backend2, email2, ALLELE_FREQ_EXPECTED_FILES) + + await backend1.invoke('get_dev_mode_info') + await backend2.invoke('get_dev_mode_info') + await backend3.invoke('get_dev_mode_info') + + const dataDir1 = await getSyftboxDataDir(backend1) + const dataDir2 = await getSyftboxDataDir(backend2) + const dataDir3 = await getSyftboxDataDir(backend3) + const participantDataDirs = new Map([ + [email1, dataDir1], + [email2, dataDir2], + [email3, dataDir3], + ]) + + const allBackends = [backend1, backend2, backend3] + await Promise.all([ + waitForDidBundleOnViewer(email1, backend1, dataDir1, email2, allBackends), + waitForDidBundleOnViewer(email1, backend1, dataDir1, email3, allBackends), + waitForDidBundleOnViewer(email2, backend2, dataDir2, email1, allBackends), + waitForDidBundleOnViewer(email2, backend2, dataDir2, email3, allBackends), + waitForDidBundleOnViewer(email3, backend3, dataDir3, email1, allBackends), + waitForDidBundleOnViewer(email3, backend3, dataDir3, email2, allBackends), + ]) + + // Pairwise contacts for encrypted invitation delivery. + // Wait/retry until DID bundles are visible across all three clients. + await Promise.all([ + waitForContactImport(backend1, email2), + waitForContactImport(backend1, email3), + waitForContactImport(backend2, email1), + waitForContactImport(backend2, email3), + waitForContactImport(backend3, email1), + waitForContactImport(backend3, email2), + ]) + + await runMultiRecipientCryptoSmoke([ + { email: email1, backend: backend1 }, + { email: email2, backend: backend2 }, + { email: email3, backend: backend3 }, + ]) + + // Import both the syqure multiparty flow and the standalone allele-freq + // pipeline (used by gen_allele_freq module's run.sh via `bv run`). + await Promise.all([ + backend1.invoke('import_flow', { + flowFile: sourceFlowPath, + overwrite: true, + }), + backend2.invoke('import_flow', { + flowFile: sourceFlowPath, + overwrite: true, + }), + backend3.invoke('import_flow', { + flowFile: sourceFlowPath, + overwrite: true, + }), + backend1.invoke('import_flow', { + flowFile: alleleFreqPipelinePath, + overwrite: true, + }), + backend2.invoke('import_flow', { + flowFile: alleleFreqPipelinePath, + overwrite: true, + }), + ]) + + const flowsAgg = await backend3.invoke('get_flows', {}) + const syqureFlowAgg = (flowsAgg || []).find((flow: any) => flow?.name === flowName) + expect(syqureFlowAgg).toBeTruthy() + expect(syqureFlowAgg?.spec).toBeTruthy() + + const sessionId = `session-${Date.now()}` + // Keep runId aligned with multiparty session_id so the shared _progress and step paths + // are observed consistently by collaborative UI/state readers. + const runId = sessionId + const datasites = [email3, email1, email2] + + const flowSpec = { + apiVersion: 'syftbox.openmined.org/v1alpha1', + kind: 'Flow', + metadata: { + name: flowName, + version: syqureFlowAgg?.version || '0.1.0', + }, + spec: syqureFlowAgg.spec, + } + + const participants = [ + { email: email3, role: 'aggregator' }, + { email: email1, role: 'client1' }, + { email: email2, role: 'client2' }, + ] + + await backend3.invoke('send_message', { + request: { + recipients: [email1, email2], + body: `Join collaborative Syqure flow run ${runId}`, + subject: `Multiparty Flow: ${flowName}`, + metadata: { + flow_invitation: { + flow_name: flowName, + session_id: sessionId, + participants, + flow_spec: flowSpec, + }, + }, + }, + }) + + await importAndJoinInvitation(page1, backend1, email1, flowName, ALLELE_FREQ_EXPECTED_FILES) + await importAndJoinInvitation(page2, backend2, email2, flowName, ALLELE_FREQ_EXPECTED_FILES) + await importAndJoinInvitation(page3, backend3, email3, flowName, 0) + + const [runId1, runId2, runId3] = await Promise.all([ + waitForSessionRunId(backend1, sessionId, email1, 90_000), + waitForSessionRunId(backend2, sessionId, email2, 90_000), + waitForSessionRunId(backend3, sessionId, email3, 90_000), + ]) + expect(runId1).toBeGreaterThan(0) + expect(runId2).toBeGreaterThan(0) + expect(runId3).toBeGreaterThan(0) + + const flows1 = await backend1.invoke('get_flows', {}) + const flows2 = await backend2.invoke('get_flows', {}) + const flows3 = await backend3.invoke('get_flows', {}) + const syqureFlow1 = (flows1 || []).find((flow: any) => flow?.name === flowName) + const syqureFlow2 = (flows2 || []).find((flow: any) => flow?.name === flowName) + const syqureFlow3 = (flows3 || []).find((flow: any) => flow?.name === flowName) + expect(syqureFlow1).toBeTruthy() + expect(syqureFlow2).toBeTruthy() + expect(syqureFlow3).toBeTruthy() + // Drive execution through the same UI controls users use (Run/Share per participant window). + await Promise.all([clickRunsTab(page1), clickRunsTab(page2), clickRunsTab(page3)]) + + // Stage 1: clients run gen_allele_freq (local-only output). + // Use backend invocation to avoid flaky UI click timing around step enablement. + await Promise.all([ + runStepViaBackendWhenReadyAndWait(backend1, sessionId, 'gen_allele_freq', email1, [ + 'Completed', + 'Shared', + ]), + runStepViaBackendWhenReadyAndWait(backend2, sessionId, 'gen_allele_freq', email2, [ + 'Completed', + 'Shared', + ]), + ]) + + // Stage 1b: clients share only locus_index derived artifact. + await Promise.all([ + runStepViaBackendWhenReadyAndWait(backend1, sessionId, 'share_locus_index', email1, [ + 'Completed', + 'Shared', + ]), + runStepViaBackendWhenReadyAndWait(backend2, sessionId, 'share_locus_index', email2, [ + 'Completed', + 'Shared', + ]), + ]) + await Promise.all([ + shareStepViaBackendAndWait(backend1, sessionId, 'share_locus_index', email1, 180_000), + shareStepViaBackendAndWait(backend2, sessionId, 'share_locus_index', email2, 180_000), + ]) + // Ensure aggregator can actually see both shared locus-index artifacts before build_master. + await waitForSharedFileOnViewers( + participantDataDirs, + email1, + flowName, + runId, + 2, + 'share_locus_index', + 'locus_index.tsv', + [email3], + ) + await waitForSharedFileOnViewers( + participantDataDirs, + email2, + flowName, + runId, + 2, + 'share_locus_index', + 'locus_index.tsv', + [email3], + ) + + // Stage 2: aggregator run + share build_master. + // Use backend invocation to avoid flaky UI click timing around step enablement. + await runStepViaBackendWhenReadyAndWait(backend3, sessionId, 'build_master', email3, [ + 'Completed', + 'Shared', + ]) + await shareStepViaBackendAndWait(backend3, sessionId, 'build_master', email3, 180_000) + await assertBuildMasterOutputsNonEmpty(participantDataDirs, email3, flowName, runId) + + // Stage 3: clients run align_counts. + await Promise.all([ + runStepViaBackendWhenReadyAndWait(backend1, sessionId, 'align_counts', email1, [ + 'Completed', + 'Shared', + ]), + runStepViaBackendWhenReadyAndWait(backend2, sessionId, 'align_counts', email2, [ + 'Completed', + 'Shared', + ]), + ]) + + // Stage 3b: run explicit barrier after align_counts so downstream deps converge. + await Promise.all([ + runStepViaBackendWhenReadyAndWait(backend1, sessionId, 'mpc_barrier', email1, [ + 'Completed', + 'Shared', + ]), + runStepViaBackendWhenReadyAndWait(backend2, sessionId, 'mpc_barrier', email2, [ + 'Completed', + 'Shared', + ]), + ]) + + // Stage 4: run secure_aggregate via backend commands. + // This avoids flaky UI actions when get_multiparty_flow_state polling is slow. + await Promise.all([ + runStepViaBackendWhenReadyAndWait( + backend1, + sessionId, + 'secure_aggregate', + email1, + ['Running', 'Completed', 'Shared'], + RUN_TIMEOUT_MS, + ), + runStepViaBackendWhenReadyAndWait( + backend2, + sessionId, + 'secure_aggregate', + email2, + ['Running', 'Completed', 'Shared'], + RUN_TIMEOUT_MS, + ), + runStepViaBackendWhenReadyAndWait( + backend3, + sessionId, + 'secure_aggregate', + email3, + ['Running', 'Completed', 'Shared'], + RUN_TIMEOUT_MS, + ), + ]) + + // Stage 4b: all parties share secure_aggregate outputs via backend commands. + await Promise.all([ + shareStepViaBackendAndWait(backend1, sessionId, 'secure_aggregate', email1, RUN_TIMEOUT_MS), + shareStepViaBackendAndWait(backend2, sessionId, 'secure_aggregate', email2, RUN_TIMEOUT_MS), + shareStepViaBackendAndWait(backend3, sessionId, 'secure_aggregate', email3, RUN_TIMEOUT_MS), + ]) + + // Stage 5: clients run report_aggregate. + // Use retrying backend start since this step can race after secure_aggregate share. + await Promise.all([ + runStepViaBackendWhenReadyAndWait(backend1, sessionId, 'report_aggregate', email1, [ + 'Completed', + 'Shared', + ]), + runStepViaBackendWhenReadyAndWait(backend2, sessionId, 'report_aggregate', email2, [ + 'Completed', + 'Shared', + ]), + ]) + + assertSharedRunDirExists(dataDir1, email1, flowName, runId) + assertSharedRunDirExists(dataDir2, email2, flowName, runId) + assertSharedRunDirExists(dataDir3, email3, flowName, runId) + + // Verify each participant can observe converged step statuses from all parties. + const viewers = [ + { label: email1, backend: backend1 }, + { label: email2, backend: backend2 }, + { label: email3, backend: backend3 }, + ] + await waitForProgressConvergence(viewers, sessionId, [ + { email: email1, stepId: 'gen_allele_freq', statuses: ['Completed'] }, + { email: email2, stepId: 'gen_allele_freq', statuses: ['Completed'] }, + { email: email1, stepId: 'share_locus_index', statuses: ['Shared', 'Completed'] }, + { email: email2, stepId: 'share_locus_index', statuses: ['Shared', 'Completed'] }, + { email: email3, stepId: 'build_master', statuses: ['Shared', 'Completed'] }, + { email: email1, stepId: 'align_counts', statuses: ['Completed', 'Shared'] }, + { email: email2, stepId: 'align_counts', statuses: ['Completed', 'Shared'] }, + { email: email1, stepId: 'secure_aggregate', statuses: ['Shared', 'Completed'] }, + { email: email2, stepId: 'secure_aggregate', statuses: ['Shared', 'Completed'] }, + { email: email1, stepId: 'report_aggregate', statuses: ['Completed', 'Shared'] }, + { email: email2, stepId: 'report_aggregate', statuses: ['Completed', 'Shared'] }, + ]) + + const finalRun1 = await waitForRunStatus( + backend1, + runId1, + ['success', 'failed', 'error'], + RUN_TIMEOUT_MS, + email1, + ) + const finalRun2 = await waitForRunStatus( + backend2, + runId2, + ['success', 'failed', 'error'], + RUN_TIMEOUT_MS, + email2, + ) + const finalRun3 = await waitForRunStatus( + backend3, + runId3, + ['success', 'failed', 'error'], + RUN_TIMEOUT_MS, + email3, + ) + console.log( + `Final run statuses: client1=${finalRun1.status}, client2=${finalRun2.status}, aggregator=${finalRun3.status}`, + ) + expect(finalRun1.status).toBe('success') + expect(finalRun2.status).toBe('success') + expect(finalRun3.status).toBe('success') + + const runRoot1 = finalRun1.results_dir || finalRun1.work_dir + const runRoot2 = finalRun2.results_dir || finalRun2.work_dir + expect(runRoot1 && fs.existsSync(runRoot1)).toBe(true) + expect(runRoot2 && fs.existsSync(runRoot2)).toBe(true) + expect(collectMatchingFiles(runRoot1, 'report.json').length).toBeGreaterThan(0) + expect(collectMatchingFiles(runRoot1, 'report.tsv').length).toBeGreaterThan(0) + expect(collectMatchingFiles(runRoot1, 'aggregated_allele_freq.tsv').length).toBeGreaterThan(0) + expect(collectMatchingFiles(runRoot2, 'report.json').length).toBeGreaterThan(0) + expect(collectMatchingFiles(runRoot2, 'report.tsv').length).toBeGreaterThan(0) + expect(collectMatchingFiles(runRoot2, 'aggregated_allele_freq.tsv').length).toBeGreaterThan(0) + + // Verify _progress state/log files are synced and visible cross-datasite. + for (const ownerEmail of [email1, email2, email3]) { + for (const viewerEmail of [email1, email2, email3]) { + const viewerDataDir = participantDataDirs.get(viewerEmail)! + const runDir = getSharedRunDir(viewerDataDir, ownerEmail, flowName, runId) + await waitForCondition( + () => fs.existsSync(path.join(runDir, '_progress', 'state.json')), + `${viewerEmail} sees ${ownerEmail} _progress/state.json`, + ) + await waitForCondition( + () => + fs.existsSync(path.join(runDir, '_progress', 'log.jsonl')) || + fs.existsSync(path.join(runDir, '_progress', 'progress.json')), + `${viewerEmail} sees ${ownerEmail} _progress log`, + ) + } + } + + // Stage 1 share: clients share locus index with aggregator (no raw allele_freq sharing). + await waitForSharedFileOnViewers( + participantDataDirs, + email1, + flowName, + runId, + 2, + 'share_locus_index', + 'locus_index.tsv', + [email1, email3], + ) + await waitForSharedFileOnViewers( + participantDataDirs, + email2, + flowName, + runId, + 2, + 'share_locus_index', + 'locus_index.tsv', + [email2, email3], + ) + + // Stage 2 share: aggregator shares master list with all. + await waitForSharedFileOnViewers( + participantDataDirs, + email3, + flowName, + runId, + 3, + 'build_master', + 'union_locus_index.json', + [email1, email2, email3], + ) + + // Final secure share: clients share secure_aggregate output back to all. + for (const ownerEmail of [email1, email2]) { + await waitForSharedFileOnViewers( + participantDataDirs, + ownerEmail, + flowName, + runId, + 6, + 'secure_aggregate', + 'aggregated_counts.json', + [email1, email2, email3], + ) + } + + // Verify secure share permissions include all participants on each client output. + for (const ownerEmail of [email1, email2]) { + const ownerRunDir = getSharedRunDir( + participantDataDirs.get(ownerEmail)!, + ownerEmail, + flowName, + runId, + ) + const secureDir = findExistingSharedStepDir(ownerRunDir, 6, 'secure_aggregate') + expect(secureDir).toBeTruthy() + const syftPubPath = path.join(secureDir!, 'syft.pub.yaml') + expect(fs.existsSync(syftPubPath)).toBe(true) + const syftPub = fs.readFileSync(syftPubPath, 'utf8') + expect(syftPub).toContain(`- ${email1}`) + expect(syftPub).toContain(`- ${email2}`) + expect(syftPub).toContain(`- ${email3}`) + } + + for (const ownerEmail of [email1, email2]) { + for (const viewerEmail of [email1, email2, email3]) { + const viewerDataDir = participantDataDirs.get(viewerEmail)! + const ownerRunDir = getSharedRunDir(viewerDataDir, ownerEmail, flowName, runId) + const secureDir = findExistingSharedStepDir(ownerRunDir, 6, 'secure_aggregate') + expect(secureDir).toBeTruthy() + const aggregatedPath = path.join(secureDir!, 'aggregated_counts.json') + expect(fs.existsSync(aggregatedPath)).toBe(true) + } + } + + log(logSocket, { + event: 'syqure-multiparty-allele-freq-complete', + runId, + }) + } finally { + if (page1) await page1.close().catch(() => {}) + if (page2) await page2.close().catch(() => {}) + if (page3) await page3.close().catch(() => {}) + + if (backend1) await backend1.close().catch(() => {}) + if (backend2) await backend2.close().catch(() => {}) + if (backend3) await backend3.close().catch(() => {}) + + if (logSocket && logSocket.readyState === WebSocket.OPEN) { + await new Promise((resolve) => { + logSocket!.once('close', () => resolve()) + logSocket!.close() + }) + } + } + }) +}) diff --git a/tests/ui/syqure-multiparty-flow.spec.ts b/tests/ui/syqure-multiparty-flow.spec.ts new file mode 100644 index 00000000..c1c9fdb3 --- /dev/null +++ b/tests/ui/syqure-multiparty-flow.spec.ts @@ -0,0 +1,1584 @@ +/** + * Syqure Multiparty Flow Test (Three Clients) + * Uses the same invitation system as --pipelines-multiparty-flow, but executes + * the real syqure flow from biovault/tests/scenarios/syqure-flow/flow.yaml. + * + * Usage: + * ./test-scenario.sh --syqure-multiparty-flow --interactive + * + * @tag syqure-multiparty-flow + */ +import { expect, test, type Page } from './playwright-fixtures' +import WebSocket from 'ws' +import * as fs from 'node:fs' +import * as path from 'node:path' +import { setWsPort, completeOnboarding, ensureLogSocket, log } from './onboarding-helper.js' + +const TEST_TIMEOUT = 1_800_000 // 30 minutes (syqure runtime can take time) +const UI_TIMEOUT = 20_000 +const SYNC_INTERVAL = 1000 +const MESSAGE_TIMEOUT = 180_000 +const RUN_TIMEOUT_MS = Number.parseInt( + process.env.SYQURE_MULTIPARTY_RUN_TIMEOUT_MS || '1200000', + 10, +) +const SECURE_ONLY_MODE = ['1', 'true', 'yes'].includes( + String(process.env.SYQURE_MULTIPARTY_SECURE_ONLY || '').toLowerCase(), +) +const CLI_PARITY_MODE = ['1', 'true', 'yes'].includes( + String(process.env.SYQURE_MULTIPARTY_CLI_PARITY || '').toLowerCase(), +) + +test.describe.configure({ timeout: TEST_TIMEOUT }) + +interface Backend { + invoke: (cmd: string, args?: Record, timeoutMs?: number) => Promise + close: () => Promise +} + +async function connectBackend(port: number): Promise { + const socket = new WebSocket(`ws://localhost:${port}`) + await new Promise((resolve, reject) => { + const timeout = setTimeout( + () => reject(new Error(`WS connect timeout on port ${port}`)), + 10_000, + ) + socket.once('open', () => { + clearTimeout(timeout) + resolve() + }) + socket.once('error', (err) => { + clearTimeout(timeout) + reject(err) + }) + }) + + let nextId = 0 + const pending = new Map void; reject: (e: any) => void }>() + + socket.on('message', (data) => { + let parsed: any + try { + parsed = JSON.parse(data.toString()) + } catch { + return + } + const entry = pending.get(parsed?.id) + if (!entry) return + pending.delete(parsed.id) + if (parsed.error) entry.reject(new Error(parsed.error)) + else entry.resolve(parsed.result) + }) + + function invoke(cmd: string, args: Record = {}, timeoutMs = 30_000) { + const id = ++nextId + socket.send(JSON.stringify({ id, cmd, args })) + return new Promise((resolve, reject) => { + pending.set(id, { resolve, reject }) + setTimeout(() => { + if (!pending.has(id)) return + pending.delete(id) + reject(new Error(`WS invoke timeout: ${cmd}`)) + }, timeoutMs) + }) + } + + async function close() { + if (socket.readyState !== WebSocket.OPEN) return + await new Promise((resolve) => { + socket.once('close', () => resolve()) + socket.close() + }) + } + + return { invoke, close } +} + +function resolveDatasitesRoot(dataDir: string): string { + return path.basename(dataDir) === 'datasites' ? dataDir : path.join(dataDir, 'datasites') +} + +async function getSyftboxDataDir(backend: Backend): Promise { + const info = await backend.invoke('get_syftbox_config_info') + const dataDir = info?.data_dir + if (!dataDir || typeof dataDir !== 'string') { + throw new Error('WS bridge did not return a usable data_dir (get_syftbox_config_info)') + } + return dataDir +} + +function prepareSecureOnlyFixtures(sessionId: string): { countsPath: string; countPath: string } { + const fixturesDir = path.join(process.cwd(), 'artifacts', 'syqure-secure-only', sessionId) + fs.mkdirSync(fixturesDir, { recursive: true }) + + const counts = [3, 1, 0] + const countsPath = path.join(fixturesDir, 'counts_array.json') + const countPath = path.join(fixturesDir, 'count.txt') + fs.writeFileSync(countsPath, `${JSON.stringify(counts)}\n`, 'utf8') + fs.writeFileSync(countPath, `${counts.length}\n`, 'utf8') + + return { countsPath, countPath } +} + +function buildSecureOnlyFlowSpec( + datasites: [string, string, string], + countsPath: string, + countPath: string, + secureAggregateModulePath: string, +): Record { + return { + vars: { + flow_path: 'syft://{datasite.current}/shared/flows/{flow_name}', + run_path: '{vars.flow_path}/{run_id}', + step_path: '{vars.run_path}/{step.number}-{step.id}', + }, + coordination: { + url: '{vars.run_path}/_progress', + share_with: 'all', + }, + mpc: { + url: '{vars.run_path}/_mpc', + topology: 'mesh', + }, + inputs: { + datasites: { + default: datasites, + }, + }, + datasites: { + all: 'inputs.datasites', + groups: { + aggregator: { + include: ['{datasites[0]}'], + }, + clients: { + include: ['{datasites[1]}', '{datasites[2]}'], + }, + }, + }, + modules: { + secure_aggregate: { + source: { + kind: 'local', + path: secureAggregateModulePath, + }, + allow_dirty: true, + interface: { + inputs: [ + { name: 'counts', type: 'File' }, + { name: 'array_length', type: 'String' }, + ], + outputs: [{ name: 'aggregated_counts', type: 'File' }], + }, + assets: [{ path: 'smpc_aggregate.codon' }, { path: 'he_aggregate.codon' }], + }, + }, + steps: [ + { + id: 'secure_aggregate', + uses: 'secure_aggregate', + run: { + targets: 'all', + strategy: 'parallel', + }, + with: { + counts: { + value: countsPath, + only: 'clients', + }, + array_length: { + value: countPath, + }, + }, + share: { + result_shared: { + source: 'self.outputs.aggregated_counts', + url: '{vars.step_path}/aggregated.json', + permissions: { + read: ['{datasites[*]}'], + write: ['{datasite.current}'], + }, + }, + }, + }, + ], + } +} + +function applyCliParityModulePaths( + spec: Record, + sourceFlowPath: string, +): Record { + const cloned = JSON.parse(JSON.stringify(spec || {})) + const modulesRoot = path.join(path.dirname(sourceFlowPath), 'modules') + const modules = cloned?.modules + if (!modules || typeof modules !== 'object') return cloned + + for (const moduleDef of Object.values(modules as Record)) { + const source = moduleDef?.source + if (!source || source.kind !== 'local' || typeof source.path !== 'string') continue + + const modulePath = source.path.trim() + if (!modulePath) continue + if (path.isAbsolute(modulePath)) continue + + let relativePath = modulePath.replace(/^[.][\\/]/, '') + if (relativePath.startsWith('modules/')) { + relativePath = relativePath.slice('modules/'.length) + } + source.path = path.join(modulesRoot, relativePath) + } + + return cloned +} + +function didBundlePath(viewerDataDir: string, identity: string): string { + return path.join(resolveDatasitesRoot(viewerDataDir), identity, 'public', 'crypto', 'did.json') +} + +function normalizeMetadata(metadata: any): any { + if (!metadata) return null + if (typeof metadata === 'string') { + try { + return JSON.parse(metadata) + } catch { + return null + } + } + return metadata +} + +async function waitForThreadMessageMatching( + backend: Backend, + threadId: string, + predicate: (msg: any) => boolean, + label: string, + timeoutMs = MESSAGE_TIMEOUT, +): Promise { + const start = Date.now() + while (Date.now() - start < timeoutMs) { + try { + await backend.invoke('sync_messages_with_failures') + } catch { + // Ignore transient sync failures while polling. + } + const msgs = await backend.invoke('get_thread_messages', { threadId }).catch(() => []) + const found = Array.isArray(msgs) ? msgs.find((msg: any) => predicate(msg)) : null + if (found) return found + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, SYNC_INTERVAL)) + } + throw new Error(`Timed out waiting for thread message: ${label}`) +} + +async function runMultiRecipientCryptoSmoke( + participants: Array<{ email: string; backend: Backend }>, +): Promise { + const smokeTag = `crypto-smoke-${Date.now()}` + console.log(`--- Multi-recipient encryption smoke: ${smokeTag} ---`) + + const sendCases = [ + { from: participants[0], to: [participants[1], participants[2]] }, + { from: participants[1], to: [participants[0], participants[2]] }, + { from: participants[2], to: [participants[0], participants[1]] }, + ] + + for (const sendCase of sendCases) { + const recipientEmails = sendCase.to.map((entry) => entry.email) + const body = `[${smokeTag}] ${sendCase.from.email} -> ${recipientEmails.join(', ')}` + const sent = await sendCase.from.backend.invoke('send_message', { + request: { + recipients: recipientEmails, + subject: `Crypto smoke ${smokeTag}`, + body, + metadata: { + crypto_smoke: { + tag: smokeTag, + sender: sendCase.from.email, + recipients: recipientEmails, + }, + }, + }, + }) + const threadId = sent?.thread_id + expect(typeof threadId).toBe('string') + console.log( + ` Sent smoke message: ${sendCase.from.email} -> ${recipientEmails.join(', ')} (thread ${threadId})`, + ) + + await waitForThreadMessageMatching( + sendCase.from.backend, + threadId, + (msg) => String(msg?.body || '').includes(body), + `sender sees smoke message (${sendCase.from.email})`, + ) + + for (const recipient of sendCase.to) { + const received = await waitForThreadMessageMatching( + recipient.backend, + threadId, + (msg) => String(msg?.body || '').includes(body), + `${recipient.email} receives/decrypts smoke message`, + ) + const metadata = normalizeMetadata(received?.metadata) + expect(metadata?.crypto_smoke?.tag).toBe(smokeTag) + } + } + + console.log('Multi-recipient encryption smoke passed for all sender/recipient pairs') +} + +async function waitForContactImport( + backend: Backend, + identity: string, + timeoutMs = 120_000, +): Promise { + const start = Date.now() + let lastError = '' + while (Date.now() - start < timeoutMs) { + try { + await backend.invoke('network_import_contact', { identity }) + return + } catch (error) { + lastError = String(error) + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, SYNC_INTERVAL)) + } + throw new Error(`Timed out waiting for DID/contact import for ${identity}: ${lastError}`) +} + +async function waitForDidBundleOnViewer( + label: string, + viewerBackend: Backend, + viewerDataDir: string, + identity: string, + allBackends: Backend[], + timeoutMs = 120_000, +): Promise { + const start = Date.now() + const didPath = didBundlePath(viewerDataDir, identity) + while (Date.now() - start < timeoutMs) { + await Promise.all( + allBackends.map((backend) => backend.invoke('trigger_syftbox_sync').catch(() => {})), + ) + if (fs.existsSync(didPath)) return + await viewerBackend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, SYNC_INTERVAL)) + } + throw new Error(`Timed out waiting for DID bundle (${label}): ${didPath}`) +} + +async function clickMessagesTab(page: Page): Promise { + const navTab = page.locator('.nav-item[data-tab="messages"]').first() + if (await navTab.isVisible().catch(() => false)) { + await navTab.click() + return + } + await page.locator('button:has-text("Messages")').first().click() +} + +async function clickRunsTab(page: Page): Promise { + const navTab = page.locator('.nav-item[data-tab="runs"]').first() + if (await navTab.isVisible().catch(() => false)) { + await navTab.click() + return + } + await page.locator('button:has-text("Runs")').first().click() +} + +async function clickStepActionButton( + page: Page, + stepId: string, + buttonClass: string, + label: string, + timeoutMs = UI_TIMEOUT, +): Promise { + const startedAt = Date.now() + let lastError = '' + while (Date.now() - startedAt < timeoutMs) { + try { + await clickRunsTab(page) + const openAllBtn = page + .locator('.mp-progress-actions .mp-collapse-btn:has-text("Open All")') + .first() + if (await openAllBtn.isVisible().catch(() => false)) { + await openAllBtn.click().catch(() => {}) + } + + const step = page.locator(`.mp-step[data-step-id="${stepId}"]`).first() + await expect(step).toBeVisible({ timeout: 3_000 }) + const actionBtn = step.locator(`button.${buttonClass}`).first() + await expect(actionBtn).toBeVisible({ timeout: 3_000 }) + await expect(actionBtn).toBeEnabled({ timeout: 3_000 }) + await actionBtn.click() + console.log(`${label}: clicked ${buttonClass} for ${stepId}`) + return + } catch (error) { + lastError = String(error) + await page.waitForTimeout(1_000) + } + } + + throw new Error(`Timed out clicking ${buttonClass} for ${stepId} (${label}): ${lastError}`) +} + +async function waitForLocalStepStatus( + backend: Backend, + sessionId: string, + stepId: string, + expectedStatuses: string[], + label: string, + timeoutMs = RUN_TIMEOUT_MS, +): Promise { + const startedAt = Date.now() + let lastStatus = 'unknown' + let lastError = '' + while (Date.now() - startedAt < timeoutMs) { + try { + const state = await backend.invoke('get_multiparty_flow_state', { sessionId }, 120_000) + const step = (state?.steps || []).find((entry: any) => entry?.id === stepId) + const status = step?.status ? String(step.status) : '' + if (status) { + lastStatus = status + if (expectedStatuses.includes(status)) return + if (status === 'Failed') { + const stepLogs = await backend + .invoke('get_multiparty_step_logs', { sessionId, stepId, lines: 240 }) + .catch(() => '') + throw new Error( + `${label}: step "${stepId}" entered Failed state.\n${String(stepLogs || '')}`, + ) + } + } + } catch (error) { + lastError = String(error) + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 1200)) + } + throw new Error( + `${label}: timed out waiting for step "${stepId}" statuses [${expectedStatuses.join(', ')}] (last=${lastStatus})` + + (lastError ? `\nLast error: ${lastError}` : ''), + ) +} + +async function waitForSessionRunId( + backend: Backend, + sessionId: string, + label: string, + timeoutMs = RUN_TIMEOUT_MS, +): Promise { + const startedAt = Date.now() + let lastRunId = 0 + let lastError = '' + while (Date.now() - startedAt < timeoutMs) { + try { + const state = await backend.invoke('get_multiparty_flow_state', { sessionId }, 120_000) + const runId = Number(state?.run_id || 0) + if (runId > 0) return runId + lastRunId = runId + } catch (error) { + lastError = String(error) + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 1200)) + } + throw new Error( + `${label}: timed out waiting for multiparty run_id > 0 (last=${lastRunId})` + + (lastError ? `\nLast error: ${lastError}` : ''), + ) +} + +async function clickStepActionAndWait( + page: Page, + backend: Backend, + sessionId: string, + stepId: string, + buttonClass: string, + label: string, + expectedStatuses: string[], + timeoutMs = RUN_TIMEOUT_MS, +): Promise { + await clickStepActionButton(page, stepId, buttonClass, label, timeoutMs) + await waitForLocalStepStatus(backend, sessionId, stepId, expectedStatuses, label, timeoutMs) +} + +async function importAndJoinInvitation( + page: Page, + backend: Backend, + label: string, + flowName: string, +): Promise { + const start = Date.now() + while (Date.now() - start < MESSAGE_TIMEOUT) { + await clickMessagesTab(page) + await backend.invoke('sync_messages_with_failures').catch(() => {}) + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + + const refreshBtn = page.locator('#refresh-messages-btn').first() + if (await refreshBtn.isVisible().catch(() => false)) { + await refreshBtn.click().catch(() => {}) + await page.waitForTimeout(500) + } + + const threadBySubject = page + .locator(`.message-thread-item:has-text("Multiparty Flow: ${flowName}")`) + .first() + if (await threadBySubject.isVisible().catch(() => false)) { + await threadBySubject.click() + } else { + const firstThread = page.locator('.message-thread-item').first() + if (await firstThread.isVisible().catch(() => false)) { + await firstThread.click() + } + } + + const invitationCard = page.locator('.flow-invitation-card').first() + if (await invitationCard.isVisible().catch(() => false)) { + const importBtn = invitationCard.locator( + '.flow-invitation-btn.import-btn, button:has-text("Import Flow")', + ) + const joinBtn = invitationCard.locator( + '.flow-invitation-btn.view-runs-btn, button:has-text("Join Flow"), button:has-text("View Flow")', + ) + + if (await importBtn.isVisible({ timeout: 1500 }).catch(() => false)) { + await importBtn.click() + await page.waitForTimeout(1200) + } + + if (await joinBtn.isVisible({ timeout: 1500 }).catch(() => false)) { + const joinText = (await joinBtn.textContent().catch(() => '')) || '' + if (joinText.includes('View Flow')) { + console.log(`${label}: already joined`) + return + } + await expect(joinBtn).toBeEnabled({ timeout: UI_TIMEOUT }) + await joinBtn.click() + console.log(`${label}: joined invitation flow`) + return + } + } + + await page.waitForTimeout(SYNC_INTERVAL) + } + + throw new Error(`${label}: timed out waiting for flow invitation card`) +} + +async function waitForRunStatus( + backend: Backend, + runId: number, + expectedStatuses: string[], + timeoutMs = RUN_TIMEOUT_MS, + label = 'run', +): Promise { + const startTime = Date.now() + let lastStatus = 'unknown' + let lastPollError = '' + let consecutivePollErrors = 0 + while (Date.now() - startTime < timeoutMs) { + let runs: any[] = [] + try { + // get_flow_runs can be slow while Syqure compute is active; allow a longer WS timeout. + runs = await backend.invoke('get_flow_runs', {}, 120_000) + consecutivePollErrors = 0 + } catch (error) { + lastPollError = String(error) + consecutivePollErrors += 1 + if (consecutivePollErrors === 1 || consecutivePollErrors % 10 === 0) { + console.warn( + `${label}: get_flow_runs poll error (${consecutivePollErrors}): ${lastPollError}`, + ) + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 2_000)) + continue + } + const run = (runs || []).find((r: any) => r.id === runId) + if (run?.status && run.status !== lastStatus) { + lastStatus = run.status + console.log(`${label}: run ${runId} status -> ${lastStatus}`) + } + if (run && expectedStatuses.includes(run.status)) { + return run + } + await backend.invoke('trigger_syftbox_sync').catch(() => {}) + await new Promise((r) => setTimeout(r, 2_000)) + } + let logTail = '' + try { + logTail = String((await backend.invoke('get_flow_run_logs_tail', { runId, lines: 200 })) || '') + } catch { + // Ignore diagnostic failures and surface the timeout. + } + throw new Error( + `Timed out waiting for run ${runId} status: ${expectedStatuses.join(', ')} (last=${lastStatus})` + + (lastPollError ? `\nLast poll error: ${lastPollError}` : '') + + (logTail ? `\nLast log tail:\n${logTail}` : ''), + ) +} + +function collectMatchingFiles(rootDir: string, filename: string): string[] { + if (!rootDir || !fs.existsSync(rootDir)) return [] + const matches: string[] = [] + const stack = [rootDir] + while (stack.length > 0) { + const current = stack.pop()! + for (const entry of fs.readdirSync(current, { withFileTypes: true })) { + const fullPath = path.join(current, entry.name) + if (entry.isDirectory()) { + stack.push(fullPath) + } else if (entry.isFile() && entry.name === filename) { + matches.push(fullPath) + } + } + } + return matches +} + +function assertSharedRunDirExists(dataDir: string, ownerEmail: string, runId: string) { + const datasitesRoot = resolveDatasitesRoot(dataDir) + const runDir = path.join(datasitesRoot, ownerEmail, 'shared', 'flows', 'syqure-flow', runId) + expect(fs.existsSync(runDir)).toBe(true) + + const hasProgressDir = + fs.existsSync(path.join(runDir, '_progress')) || fs.existsSync(path.join(runDir, 'progress')) + expect(hasProgressDir).toBe(true) +} + +function getSharedRunDir( + dataDir: string, + ownerEmail: string, + flowName: string, + runId: string, +): string { + return path.join(resolveDatasitesRoot(dataDir), ownerEmail, 'shared', 'flows', flowName, runId) +} + +type MpcTcpMarker = { + from: string + to: string + port: number + ports: Record +} + +function readMpcTcpMarker(markerPath: string): MpcTcpMarker { + const raw = fs.readFileSync(markerPath, 'utf8').trim() + const parsed = JSON.parse(raw) + return { + from: String(parsed?.from || ''), + to: String(parsed?.to || ''), + port: Number(parsed?.port || 0), + ports: Object.fromEntries( + Object.entries(parsed?.ports || {}).map(([email, port]) => [String(email), Number(port)]), + ), + } +} + +function normalizePortMap(portMap: Record): string { + return Object.entries(portMap) + .sort(([a], [b]) => a.localeCompare(b)) + .map(([email, port]) => `${email}:${port}`) + .join('|') +} + +function getExpectedMpcChannels(datasites: string[]) { + const channels: Array<{ + from: string + to: string + fromIndex: number + toIndex: number + channelId: string + }> = [] + for (let fromIndex = 0; fromIndex < datasites.length; fromIndex += 1) { + for (let toIndex = 0; toIndex < datasites.length; toIndex += 1) { + if (fromIndex === toIndex) continue + channels.push({ + from: datasites[fromIndex], + to: datasites[toIndex], + fromIndex, + toIndex, + channelId: `${fromIndex}_to_${toIndex}`, + }) + } + } + return channels +} + +function assertMpcTopology( + participantDataDirs: Map, + datasites: string[], + flowName: string, + runId: string, +): void { + const channels = getExpectedMpcChannels(datasites) + const pairMarkers = new Map() + + for (const ownerEmail of datasites) { + const ownerDataDir = participantDataDirs.get(ownerEmail) + expect(ownerDataDir, `missing data dir for owner ${ownerEmail}`).toBeTruthy() + const ownerMpcDir = path.join( + getSharedRunDir(ownerDataDir!, ownerEmail, flowName, runId), + '_mpc', + ) + expect(fs.existsSync(ownerMpcDir), `owner _mpc missing: ${ownerMpcDir}`).toBe(true) + expect( + fs.existsSync(path.join(ownerMpcDir, 'syft.pub.yaml')), + `owner _mpc/syft.pub.yaml missing: ${ownerMpcDir}`, + ).toBe(true) + } + + for (const channel of channels) { + const ownerDataDir = participantDataDirs.get(channel.from) + expect(ownerDataDir, `missing data dir for owner ${channel.from}`).toBeTruthy() + + const ownerMpcDir = path.join( + getSharedRunDir(ownerDataDir!, channel.from, flowName, runId), + '_mpc', + ) + const ownerChannelDir = path.join(ownerMpcDir, channel.channelId) + const ownerTcpPath = path.join(ownerChannelDir, 'stream.tcp') + const ownerAcceptPath = path.join(ownerChannelDir, 'stream.accept') + const ownerAclPath = path.join(ownerChannelDir, 'syft.pub.yaml') + + expect(fs.existsSync(ownerChannelDir), `owner channel dir missing: ${ownerChannelDir}`).toBe( + true, + ) + expect(fs.existsSync(ownerTcpPath), `owner stream.tcp missing: ${ownerTcpPath}`).toBe(true) + expect(fs.existsSync(ownerAcceptPath), `owner stream.accept missing: ${ownerAcceptPath}`).toBe( + true, + ) + expect( + fs.existsSync(ownerAclPath), + `owner channel syft.pub.yaml missing: ${ownerAclPath}`, + ).toBe(true) + + const acceptValue = fs.readFileSync(ownerAcceptPath, 'utf8').trim() + expect( + acceptValue === '1' || acceptValue === 'true', + `owner stream.accept invalid (${ownerAcceptPath}): ${acceptValue}`, + ).toBe(true) + + const marker = readMpcTcpMarker(ownerTcpPath) + expect(marker.from, `stream.tcp from mismatch for ${ownerTcpPath}`).toBe(channel.from) + expect(marker.to, `stream.tcp to mismatch for ${ownerTcpPath}`).toBe(channel.to) + expect(marker.port > 0, `stream.tcp port invalid for ${ownerTcpPath}: ${marker.port}`).toBe( + true, + ) + expect( + Number(marker.ports[channel.from]) > 0, + `stream.tcp ports missing sender (${channel.from}) for ${ownerTcpPath}`, + ).toBe(true) + expect( + Number(marker.ports[channel.to]) > 0, + `stream.tcp ports missing recipient (${channel.to}) for ${ownerTcpPath}`, + ).toBe(true) + + const pairKey = [channel.from, channel.to].sort().join('<->') + const existing = pairMarkers.get(pairKey) + if (existing) { + expect(marker.port, `pair port mismatch for ${pairKey}`).toBe(existing.port) + expect(normalizePortMap(marker.ports), `pair port-map mismatch for ${pairKey}`).toBe( + normalizePortMap(existing.ports), + ) + } else { + pairMarkers.set(pairKey, marker) + } + + for (const viewerEmail of datasites) { + const viewerDataDir = participantDataDirs.get(viewerEmail) + expect(viewerDataDir, `missing data dir for viewer ${viewerEmail}`).toBeTruthy() + const viewerMpcDir = path.join( + getSharedRunDir(viewerDataDir!, channel.from, flowName, runId), + '_mpc', + ) + const viewerChannelDir = path.join(viewerMpcDir, channel.channelId) + const viewerAclPath = path.join(viewerChannelDir, 'syft.pub.yaml') + + expect( + fs.existsSync(viewerChannelDir), + `viewer channel dir missing (${viewerEmail}): ${viewerChannelDir}`, + ).toBe(true) + expect( + fs.existsSync(viewerAclPath), + `viewer channel syft.pub.yaml missing (${viewerEmail}): ${viewerAclPath}`, + ).toBe(true) + } + } +} + +async function waitForMpcTopologyReady( + participantDataDirs: Map, + datasites: string[], + flowName: string, + runId: string, + timeoutMs = MESSAGE_TIMEOUT, +): Promise { + const startedAt = Date.now() + let lastError = '' + while (Date.now() - startedAt < timeoutMs) { + try { + assertMpcTopology(participantDataDirs, datasites, flowName, runId) + return + } catch (error) { + lastError = String(error) + } + await new Promise((resolve) => setTimeout(resolve, 1200)) + } + throw new Error( + `Timed out waiting for _mpc topology/port markers to be ready for run ${runId}` + + (lastError ? `\nLast topology error: ${lastError}` : ''), + ) +} + +function getSharedStepDirCandidates(runDir: string, stepNumber: number, stepId: string): string[] { + return [ + path.join(runDir, `${stepNumber}-${stepId}`), + path.join(runDir, `${String(stepNumber).padStart(2, '0')}-${stepId}`), + ] +} + +function findExistingSharedStepDir( + runDir: string, + stepNumber: number, + stepId: string, +): string | null { + for (const candidate of getSharedStepDirCandidates(runDir, stepNumber, stepId)) { + if (fs.existsSync(candidate)) return candidate + } + return null +} + +async function waitForCondition( + check: () => boolean, + label: string, + timeoutMs = MESSAGE_TIMEOUT, + pollMs = 1000, +): Promise { + const startedAt = Date.now() + while (Date.now() - startedAt < timeoutMs) { + if (check()) return + await new Promise((resolve) => setTimeout(resolve, pollMs)) + } + throw new Error(`Timed out waiting for condition: ${label}`) +} + +async function waitForSharedFileOnViewers( + participantDataDirs: Map, + ownerEmail: string, + flowName: string, + runId: string, + stepNumber: number, + stepId: string, + fileName: string, + requiredViewerEmails: string[], + timeoutMs = MESSAGE_TIMEOUT, +): Promise { + await waitForCondition( + () => + requiredViewerEmails.every((viewerEmail) => { + const viewerDataDir = participantDataDirs.get(viewerEmail) + if (!viewerDataDir) return false + const runDir = getSharedRunDir(viewerDataDir, ownerEmail, flowName, runId) + const stepDir = findExistingSharedStepDir(runDir, stepNumber, stepId) + if (!stepDir) return false + return fs.existsSync(path.join(stepDir, fileName)) + }), + `${ownerEmail}/${stepId}/${fileName} visible on ${requiredViewerEmails.join(', ')}`, + timeoutMs, + ) +} + +function findParticipantStepStatus( + allProgress: any[], + participantEmail: string, + stepId: string, +): string | null { + const participant = (allProgress || []).find((entry) => entry?.email === participantEmail) + if (!participant) return null + const step = (participant.steps || []).find((entry: any) => entry?.step_id === stepId) + return step?.status || null +} + +async function waitForProgressConvergence( + viewers: Array<{ label: string; backend: Backend }>, + sessionId: string, + expectedStatuses: Array<{ email: string; stepId: string; statuses: string[] }>, + timeoutMs = MESSAGE_TIMEOUT, +): Promise { + const startedAt = Date.now() + while (Date.now() - startedAt < timeoutMs) { + let allSatisfied = true + + for (const viewer of viewers) { + await viewer.backend.invoke('trigger_syftbox_sync').catch(() => {}) + const allProgress = await viewer.backend + .invoke('get_all_participant_progress', { sessionId }) + .catch(() => []) + for (const expected of expectedStatuses) { + const status = findParticipantStepStatus(allProgress, expected.email, expected.stepId) + if (!status || !expected.statuses.includes(status)) { + allSatisfied = false + break + } + } + if (!allSatisfied) break + } + + if (allSatisfied) return + await new Promise((resolve) => setTimeout(resolve, 1200)) + } + + throw new Error('Timed out waiting for cross-participant progress convergence') +} + +test.describe('Syqure flow via multiparty invitation system @syqure-multiparty-flow', () => { + test('three clients join via invitation card and execute real syqure flow', async ({ + browser, + }) => { + const wsPortBase = Number.parseInt(process.env.DEV_WS_BRIDGE_PORT_BASE || '3333', 10) + const wsPort1 = wsPortBase + const wsPort2 = wsPortBase + 1 + const wsPort3 = wsPortBase + 2 + + const email1 = process.env.CLIENT1_EMAIL || 'client1@sandbox.local' + const email2 = process.env.CLIENT2_EMAIL || 'client2@sandbox.local' + const email3 = process.env.AGG_EMAIL || 'aggregator@sandbox.local' + + const flowName = 'syqure-flow' + const sourceFlowPath = path.join( + process.cwd(), + 'biovault', + 'tests', + 'scenarios', + 'syqure-flow', + 'flow.yaml', + ) + expect(fs.existsSync(sourceFlowPath)).toBe(true) + const secureAggregateModulePath = CLI_PARITY_MODE + ? path.join(path.dirname(sourceFlowPath), 'modules', 'secure-aggregate') + : './modules/secure-aggregate' + if (CLI_PARITY_MODE) { + expect(fs.existsSync(secureAggregateModulePath)).toBe(true) + } + + let logSocket: WebSocket | null = null + let backend1: Backend | null = null + let backend2: Backend | null = null + let backend3: Backend | null = null + let page1: Page | null = null + let page2: Page | null = null + let page3: Page | null = null + + try { + logSocket = await ensureLogSocket() + log(logSocket, { + event: 'syqure-multiparty-flow-start', + email1, + email2, + email3, + flowName, + }) + + page1 = await browser.newPage() + page2 = await browser.newPage() + page3 = await browser.newPage() + + await setWsPort(page1, wsPort1) + await setWsPort(page2, wsPort2) + await setWsPort(page3, wsPort3) + + backend1 = await connectBackend(wsPort1) + backend2 = await connectBackend(wsPort2) + backend3 = await connectBackend(wsPort3) + + const uiBaseUrl = process.env.UI_BASE_URL || 'http://localhost:8082' + await page1.goto(uiBaseUrl) + await page2.goto(uiBaseUrl) + await page3.goto(uiBaseUrl) + + await completeOnboarding(page1, email1, logSocket) + await completeOnboarding(page2, email2, logSocket) + await completeOnboarding(page3, email3, logSocket) + + await backend1.invoke('get_dev_mode_info') + await backend2.invoke('get_dev_mode_info') + await backend3.invoke('get_dev_mode_info') + + const dataDir1 = await getSyftboxDataDir(backend1) + const dataDir2 = await getSyftboxDataDir(backend2) + const dataDir3 = await getSyftboxDataDir(backend3) + const participantDataDirs = new Map([ + [email1, dataDir1], + [email2, dataDir2], + [email3, dataDir3], + ]) + + const allBackends = [backend1, backend2, backend3] + await Promise.all([ + waitForDidBundleOnViewer(email1, backend1, dataDir1, email2, allBackends), + waitForDidBundleOnViewer(email1, backend1, dataDir1, email3, allBackends), + waitForDidBundleOnViewer(email2, backend2, dataDir2, email1, allBackends), + waitForDidBundleOnViewer(email2, backend2, dataDir2, email3, allBackends), + waitForDidBundleOnViewer(email3, backend3, dataDir3, email1, allBackends), + waitForDidBundleOnViewer(email3, backend3, dataDir3, email2, allBackends), + ]) + + // Pairwise contacts for encrypted invitation delivery. + // Wait/retry until DID bundles are visible across all three clients. + await Promise.all([ + waitForContactImport(backend1, email2), + waitForContactImport(backend1, email3), + waitForContactImport(backend2, email1), + waitForContactImport(backend2, email3), + waitForContactImport(backend3, email1), + waitForContactImport(backend3, email2), + ]) + + await runMultiRecipientCryptoSmoke([ + { email: email1, backend: backend1 }, + { email: email2, backend: backend2 }, + { email: email3, backend: backend3 }, + ]) + + // Match biovault/tests/scenarios/syqure-distributed.yaml behavior: + // each participant runs the same source flow folder with local modules. + await Promise.all([ + backend1.invoke('import_flow', { + flowFile: sourceFlowPath, + overwrite: true, + }), + backend2.invoke('import_flow', { + flowFile: sourceFlowPath, + overwrite: true, + }), + backend3.invoke('import_flow', { + flowFile: sourceFlowPath, + overwrite: true, + }), + ]) + + const flowsAgg = await backend3.invoke('get_flows', {}) + const syqureFlowAgg = (flowsAgg || []).find((flow: any) => flow?.name === flowName) + expect(syqureFlowAgg).toBeTruthy() + expect(syqureFlowAgg?.spec).toBeTruthy() + + const invitationSeed = Date.now() + const datasites = [email3, email1, email2] + const secureStepNumber = SECURE_ONLY_MODE ? 1 : 5 + + const secureOnlyFixtures = SECURE_ONLY_MODE + ? prepareSecureOnlyFixtures(`seed-${invitationSeed}`) + : null + const resolvedSpec = SECURE_ONLY_MODE + ? buildSecureOnlyFlowSpec( + [email3, email1, email2], + secureOnlyFixtures!.countsPath, + secureOnlyFixtures!.countPath, + secureAggregateModulePath, + ) + : CLI_PARITY_MODE + ? applyCliParityModulePaths(syqureFlowAgg.spec, sourceFlowPath) + : syqureFlowAgg.spec + + const flowSpec = { + apiVersion: 'syftbox.openmined.org/v1alpha1', + kind: 'Flow', + metadata: { + name: flowName, + version: syqureFlowAgg?.version || '0.1.0', + }, + spec: resolvedSpec, + } + if (SECURE_ONLY_MODE) { + console.log( + `secure-only mode enabled: counts=${secureOnlyFixtures!.countsPath} count=${secureOnlyFixtures!.countPath}`, + ) + } + if (CLI_PARITY_MODE) { + console.log( + `cli-parity mode enabled: module paths resolved from ${path.dirname(sourceFlowPath)}`, + ) + } + // Bootstrap a group thread so the Propose Flow UI has all participants in context. + const bootstrap = await backend3.invoke('send_message', { + request: { + recipients: [email1, email2], + body: `syqure bootstrap ${invitationSeed}`, + subject: 'Syqure multiparty bootstrap', + }, + }) + const threadId = bootstrap?.thread_id + expect(typeof threadId).toBe('string') + + async function navigateToMessagesAndFindThread(page: Page, label: string) { + await clickMessagesTab(page) + await page.waitForTimeout(400) + await backend1.invoke('trigger_syftbox_sync').catch(() => {}) + await backend2.invoke('trigger_syftbox_sync').catch(() => {}) + await backend3.invoke('trigger_syftbox_sync').catch(() => {}) + + const refreshBtn = page.locator('#refresh-messages-btn').first() + if (await refreshBtn.isVisible().catch(() => false)) { + await refreshBtn.click().catch(() => {}) + await page.waitForTimeout(800) + } + + const threadBySubject = page + .locator('.message-thread-item:has-text("Syqure multiparty bootstrap")') + .first() + if (await threadBySubject.isVisible().catch(() => false)) { + await threadBySubject.click() + } else { + const firstThread = page.locator('.message-thread-item').first() + await firstThread.waitFor({ timeout: UI_TIMEOUT }) + await firstThread.click() + } + console.log(`${label}: thread selected for UI invitation send`) + } + + await navigateToMessagesAndFindThread(page1, email1) + await navigateToMessagesAndFindThread(page2, email2) + await navigateToMessagesAndFindThread(page3, email3) + + const proposeBtn = page3.locator('#propose-flow-btn') + await proposeBtn.waitFor({ timeout: UI_TIMEOUT }) + await proposeBtn.click() + const proposeModal = page3.locator('#propose-flow-modal') + await proposeModal.waitFor({ timeout: UI_TIMEOUT }) + await page3.selectOption('#propose-flow-select', { label: flowName }) + await page3.waitForTimeout(500) + + const roleRows = page3.locator('#propose-flow-roles-list .propose-flow-role-row') + const roleCount = await roleRows.count() + expect(roleCount).toBeGreaterThanOrEqual(3) + const available = [email1, email2, email3] + const used = new Set() + for (let i = 0; i < roleCount; i += 1) { + const row = roleRows.nth(i) + const roleLabel = ( + (await row.locator('.propose-flow-role-label').textContent().catch(() => '')) || '' + ) + .toLowerCase() + .trim() + const select = row.locator('select') + let preferred = '' + if (roleLabel.includes('aggregator')) preferred = email3 + else if (roleLabel.includes('1')) preferred = email1 + else if (roleLabel.includes('2')) preferred = email2 + const selectedEmail = + (preferred && !used.has(preferred) ? preferred : '') || + available.find((candidate) => !used.has(candidate)) || + preferred || + email1 + await select.selectOption(selectedEmail) + used.add(selectedEmail) + } + + await page3 + .locator('#propose-flow-message') + .fill(`Join collaborative Syqure flow run ${invitationSeed}`) + const sendBtn = page3.locator('#propose-flow-send-btn') + await expect + .poll(async () => { + try { + return await sendBtn.isEnabled() + } catch { + return false + } + }, { timeout: UI_TIMEOUT }) + .toBe(true) + await sendBtn.click({ timeout: UI_TIMEOUT }) + try { + await expect(proposeModal).toBeHidden({ timeout: 6000 }) + } catch { + await page3.evaluate(() => window.proposeFlowModal?.sendInvitation?.()) + await expect(proposeModal).toBeHidden({ timeout: UI_TIMEOUT }) + } + + const invitationMsg = await waitForThreadMessageMatching( + backend1, + threadId, + (msg) => normalizeMetadata(msg?.metadata)?.flow_invitation?.flow_name === flowName, + 'syqure flow invitation from UI modal', + ) + const invitationMeta = normalizeMetadata(invitationMsg?.metadata)?.flow_invitation || {} + const sessionId = String(invitationMeta?.session_id || invitationMeta?.sessionId || '') + expect(sessionId).toBeTruthy() + // Keep runId aligned with multiparty session_id so shared _progress/step paths align. + const runId = sessionId + + await importAndJoinInvitation(page1, backend1, email1, flowName) + await importAndJoinInvitation(page2, backend2, email2, flowName) + await importAndJoinInvitation(page3, backend3, email3, flowName) + + const [runId1, runId2, runId3] = await Promise.all([ + waitForSessionRunId(backend1, sessionId, email1, 90_000), + waitForSessionRunId(backend2, sessionId, email2, 90_000), + waitForSessionRunId(backend3, sessionId, email3, 90_000), + ]) + expect(runId1).toBeGreaterThan(0) + expect(runId2).toBeGreaterThan(0) + expect(runId3).toBeGreaterThan(0) + + const flows1 = await backend1.invoke('get_flows', {}) + const flows2 = await backend2.invoke('get_flows', {}) + const flows3 = await backend3.invoke('get_flows', {}) + const syqureFlow1 = (flows1 || []).find((flow: any) => flow?.name === flowName) + const syqureFlow2 = (flows2 || []).find((flow: any) => flow?.name === flowName) + const syqureFlow3 = (flows3 || []).find((flow: any) => flow?.name === flowName) + expect(syqureFlow1).toBeTruthy() + expect(syqureFlow2).toBeTruthy() + expect(syqureFlow3).toBeTruthy() + + // Drive execution through the same UI controls users use (Run/Share per participant window). + await Promise.all([clickRunsTab(page1), clickRunsTab(page2), clickRunsTab(page3)]) + + if (!SECURE_ONLY_MODE) { + // Stage 1: clients run + share gen_variants. + await Promise.all([ + clickStepActionAndWait( + page1, + backend1, + sessionId, + 'gen_variants', + 'mp-run-btn', + email1, + ['Completed', 'Shared'], + 180_000, + ), + clickStepActionAndWait( + page2, + backend2, + sessionId, + 'gen_variants', + 'mp-run-btn', + email2, + ['Completed', 'Shared'], + 180_000, + ), + ]) + await Promise.all([ + clickStepActionAndWait( + page1, + backend1, + sessionId, + 'gen_variants', + 'mp-share-btn', + email1, + ['Shared'], + 180_000, + ), + clickStepActionAndWait( + page2, + backend2, + sessionId, + 'gen_variants', + 'mp-share-btn', + email2, + ['Shared'], + 180_000, + ), + ]) + + // Stage 2: aggregator run + share build_master. + await clickStepActionAndWait( + page3, + backend3, + sessionId, + 'build_master', + 'mp-run-btn', + email3, + ['Completed', 'Shared'], + 180_000, + ) + await clickStepActionAndWait( + page3, + backend3, + sessionId, + 'build_master', + 'mp-share-btn', + email3, + ['Shared'], + 180_000, + ) + + // Stage 3: clients run align_counts. + await Promise.all([ + clickStepActionAndWait( + page1, + backend1, + sessionId, + 'align_counts', + 'mp-run-btn', + email1, + ['Completed', 'Shared'], + 180_000, + ), + clickStepActionAndWait( + page2, + backend2, + sessionId, + 'align_counts', + 'mp-run-btn', + email2, + ['Completed', 'Shared'], + 180_000, + ), + ]) + + // Assert MPC channel directories/markers before secure_aggregate starts. + await waitForMpcTopologyReady(participantDataDirs, datasites, flowName, runId, 180_000) + } + + // Final stage: all parties run + share secure_aggregate. + await Promise.all([ + clickStepActionAndWait( + page1, + backend1, + sessionId, + 'secure_aggregate', + 'mp-run-btn', + email1, + ['Completed', 'Shared'], + RUN_TIMEOUT_MS, + ), + clickStepActionAndWait( + page2, + backend2, + sessionId, + 'secure_aggregate', + 'mp-run-btn', + email2, + ['Completed', 'Shared'], + RUN_TIMEOUT_MS, + ), + clickStepActionAndWait( + page3, + backend3, + sessionId, + 'secure_aggregate', + 'mp-run-btn', + email3, + ['Completed', 'Shared'], + RUN_TIMEOUT_MS, + ), + ]) + await Promise.all([ + clickStepActionAndWait( + page1, + backend1, + sessionId, + 'secure_aggregate', + 'mp-share-btn', + email1, + ['Shared'], + RUN_TIMEOUT_MS, + ), + clickStepActionAndWait( + page2, + backend2, + sessionId, + 'secure_aggregate', + 'mp-share-btn', + email2, + ['Shared'], + RUN_TIMEOUT_MS, + ), + clickStepActionAndWait( + page3, + backend3, + sessionId, + 'secure_aggregate', + 'mp-share-btn', + email3, + ['Shared'], + RUN_TIMEOUT_MS, + ), + ]) + + assertSharedRunDirExists(dataDir1, email1, runId) + assertSharedRunDirExists(dataDir2, email2, runId) + assertSharedRunDirExists(dataDir3, email3, runId) + + // Verify each participant can observe converged step statuses from all parties. + const viewers = [ + { label: email1, backend: backend1 }, + { label: email2, backend: backend2 }, + { label: email3, backend: backend3 }, + ] + const expectedConvergence = SECURE_ONLY_MODE + ? [ + { email: email1, stepId: 'secure_aggregate', statuses: ['Shared', 'Completed'] }, + { email: email2, stepId: 'secure_aggregate', statuses: ['Shared', 'Completed'] }, + { email: email3, stepId: 'secure_aggregate', statuses: ['Shared', 'Completed'] }, + ] + : [ + { email: email1, stepId: 'gen_variants', statuses: ['Shared', 'Completed'] }, + { email: email2, stepId: 'gen_variants', statuses: ['Shared', 'Completed'] }, + { email: email3, stepId: 'build_master', statuses: ['Shared', 'Completed'] }, + { email: email1, stepId: 'align_counts', statuses: ['Completed', 'Shared'] }, + { email: email2, stepId: 'align_counts', statuses: ['Completed', 'Shared'] }, + { email: email1, stepId: 'secure_aggregate', statuses: ['Shared', 'Completed'] }, + { email: email2, stepId: 'secure_aggregate', statuses: ['Shared', 'Completed'] }, + { email: email3, stepId: 'secure_aggregate', statuses: ['Shared', 'Completed'] }, + ] + await waitForProgressConvergence(viewers, sessionId, expectedConvergence) + + const finalRun1 = await waitForRunStatus( + backend1, + runId1, + ['success', 'failed', 'error'], + RUN_TIMEOUT_MS, + email1, + ) + const finalRun2 = await waitForRunStatus( + backend2, + runId2, + ['success', 'failed', 'error'], + RUN_TIMEOUT_MS, + email2, + ) + const finalRun3 = await waitForRunStatus( + backend3, + runId3, + ['success', 'failed', 'error'], + RUN_TIMEOUT_MS, + email3, + ) + console.log( + `Final run statuses: client1=${finalRun1.status}, client2=${finalRun2.status}, aggregator=${finalRun3.status}`, + ) + expect(finalRun1.status).toBe('success') + expect(finalRun2.status).toBe('success') + expect(finalRun3.status).toBe('success') + + // Verify _progress state/log files are synced and visible cross-datasite. + for (const ownerEmail of [email1, email2, email3]) { + for (const viewerEmail of [email1, email2, email3]) { + const viewerDataDir = participantDataDirs.get(viewerEmail)! + const runDir = getSharedRunDir(viewerDataDir, ownerEmail, flowName, runId) + await waitForCondition( + () => fs.existsSync(path.join(runDir, '_progress', 'state.json')), + `${viewerEmail} sees ${ownerEmail} _progress/state.json`, + ) + await waitForCondition( + () => + fs.existsSync(path.join(runDir, '_progress', 'log.jsonl')) || + fs.existsSync(path.join(runDir, '_progress', 'progress.json')), + `${viewerEmail} sees ${ownerEmail} _progress log`, + ) + } + } + + if (!SECURE_ONLY_MODE) { + // Stage 1 share: clients share rsids with aggregator. + await waitForSharedFileOnViewers( + participantDataDirs, + email1, + flowName, + runId, + 1, + 'gen_variants', + 'rsids.txt', + [email1, email3], + ) + await waitForSharedFileOnViewers( + participantDataDirs, + email2, + flowName, + runId, + 1, + 'gen_variants', + 'rsids.txt', + [email2, email3], + ) + + // Stage 2 share: aggregator shares master list with all. + await waitForSharedFileOnViewers( + participantDataDirs, + email3, + flowName, + runId, + 2, + 'build_master', + 'master_list.txt', + [email1, email2, email3], + ) + } + + // Final secure share: every participant shares secure_aggregate output back to all. + for (const ownerEmail of [email1, email2, email3]) { + await waitForSharedFileOnViewers( + participantDataDirs, + ownerEmail, + flowName, + runId, + secureStepNumber, + 'secure_aggregate', + 'aggregated_counts.json', + [email1, email2, email3], + ) + } + + // Verify secure share permissions include all participants on each owner's final output. + for (const ownerEmail of [email1, email2, email3]) { + const ownerRunDir = getSharedRunDir( + participantDataDirs.get(ownerEmail)!, + ownerEmail, + flowName, + runId, + ) + const secureDir = findExistingSharedStepDir( + ownerRunDir, + secureStepNumber, + 'secure_aggregate', + ) + expect(secureDir).toBeTruthy() + const syftPubPath = path.join(secureDir!, 'syft.pub.yaml') + expect(fs.existsSync(syftPubPath)).toBe(true) + const syftPub = fs.readFileSync(syftPubPath, 'utf8') + expect(syftPub).toContain(`- ${email1}`) + expect(syftPub).toContain(`- ${email2}`) + expect(syftPub).toContain(`- ${email3}`) + } + + for (const ownerEmail of [email1, email2, email3]) { + for (const viewerEmail of [email1, email2, email3]) { + const viewerDataDir = participantDataDirs.get(viewerEmail)! + const ownerRunDir = getSharedRunDir(viewerDataDir, ownerEmail, flowName, runId) + const secureDir = findExistingSharedStepDir( + ownerRunDir, + secureStepNumber, + 'secure_aggregate', + ) + expect(secureDir).toBeTruthy() + const aggregatedPath = path.join(secureDir!, 'aggregated_counts.json') + expect(fs.existsSync(aggregatedPath)).toBe(true) + } + } + + log(logSocket, { + event: 'syqure-multiparty-flow-complete', + runId, + }) + } finally { + if (page1) await page1.close().catch(() => {}) + if (page2) await page2.close().catch(() => {}) + if (page3) await page3.close().catch(() => {}) + + if (backend1) await backend1.close().catch(() => {}) + if (backend2) await backend2.close().catch(() => {}) + if (backend3) await backend3.close().catch(() => {}) + + if (logSocket && logSocket.readyState === WebSocket.OPEN) { + await new Promise((resolve) => { + logSocket!.once('close', () => resolve()) + logSocket!.close() + }) + } + } + }) +})