11#! /bin/bash
22set -e
33
4- echo " ===== LUMINOS MASTER BUILD SCRIPT (v5.9 - Fixed AI Paths) ====="
4+ echo " ====== LUMINOS MASTER BUILD SCRIPT (v6.1 - Smart Search) = ====="
55if [ " $( id -u) " -ne 0 ]; then echo " ERROR: This script must be run as root." ; exit 1; fi
66
77# --- 1. Define Directories & Vars ---
@@ -10,8 +10,6 @@ WORK_DIR="${BASE_DIR}/work"
1010CHROOT_DIR=" ${WORK_DIR} /chroot"
1111ISO_DIR=" ${WORK_DIR} /iso"
1212AI_BUILD_DIR=" ${WORK_DIR} /ai_build"
13- # Define a consistent local path for models during build
14- LOCAL_MODEL_DIR=" ${AI_BUILD_DIR} /models"
1513ISO_NAME=" LuminOS-0.2.1-amd64.iso"
1614
1715# --- 2. Clean Up ---
@@ -28,7 +26,6 @@ mkdir -p "${CHROOT_DIR}"
2826mkdir -p " ${ISO_DIR} /live"
2927mkdir -p " ${ISO_DIR} /boot/grub"
3028mkdir -p " ${AI_BUILD_DIR} "
31- mkdir -p " ${LOCAL_MODEL_DIR} "
3229
3330# --- 3. Install Dependencies ---
3431echo " --> Installing build dependencies..."
@@ -37,44 +34,82 @@ apt-get install -y debootstrap squashfs-tools xorriso grub-pc-bin grub-efi-amd64
3734
3835# --- 4. PREPARE AI (ON HOST) ---
3936echo " ====================================================="
40- echo " PHASE 0: Pre-downloading AI Models"
37+ echo " PHASE 0: Preparing AI Models"
4138echo " ====================================================="
42-
43- # Export the variable so both 'serve' and 'pull' see it
44- export OLLAMA_MODELS=" ${LOCAL_MODEL_DIR} "
45-
46- echo " --> Downloading Ollama binary..."
47- curl -fL " https://github.com/ollama/ollama/releases/download/v0.1.32/ollama-linux-amd64" -o " ${AI_BUILD_DIR} /ollama"
48- chmod +x " ${AI_BUILD_DIR} /ollama"
49-
50- echo " --> Starting temporary Ollama server..."
51- " ${AI_BUILD_DIR} /ollama" serve > " ${AI_BUILD_DIR} /server.log" 2>&1 &
52- OLLAMA_PID=$!
53- echo " Waiting for Ollama server (PID ${OLLAMA_PID} )..."
54- sleep 10
55-
56- echo " --> Pulling base model (llama3)..."
57- " ${AI_BUILD_DIR} /ollama" pull llama3
58-
59- echo " --> Stopping temporary Ollama server..."
60- kill ${OLLAMA_PID} || true
61- wait ${OLLAMA_PID} || true
62-
63- # VERIFICATION
64- echo " --> Checking model size at ${LOCAL_MODEL_DIR} ..."
65- if [ -d " ${LOCAL_MODEL_DIR} " ]; then
66- SIZE_CHECK=$( du -s " ${LOCAL_MODEL_DIR} " | cut -f1)
67- # Expecting > 4GB (4000000 KB)
68- if [ " $SIZE_CHECK " -lt 4000000 ]; then
69- echo " ERROR: Model directory found but seems too small ($SIZE_CHECK KB)."
70- echo " Check connection or server log."
39+ TARGET_MODEL_DIR=" ${AI_BUILD_DIR} /models"
40+ mkdir -p " ${TARGET_MODEL_DIR} "
41+
42+ # Detect User Home
43+ REAL_USER=" ${SUDO_USER:- $USER } "
44+ USER_HOME=$( getent passwd " $REAL_USER " | cut -d: -f6)
45+
46+ # Define potential locations for existing models
47+ POSSIBLE_LOCATIONS=(
48+ " ${USER_HOME} /.ollama/models"
49+ " /usr/share/ollama/.ollama/models"
50+ " /var/lib/ollama/.ollama/models"
51+ " /root/.ollama/models"
52+ )
53+
54+ MODEL_FOUND=false
55+
56+ # Strategy A: Search existing locations
57+ for LOC in " ${POSSIBLE_LOCATIONS[@]} " ; do
58+ if [ -d " $LOC " ]; then
59+ SIZE_CHECK=$( du -s " $LOC " | cut -f1)
60+ if [ " $SIZE_CHECK " -gt 1000000 ]; then # Check if > 1GB
61+ echo " SUCCESS: Found models at $LOC ! Copying..."
62+ cp -r " ${LOC} /." " ${TARGET_MODEL_DIR} /"
63+ MODEL_FOUND=true
64+ break
65+ fi
66+ fi
67+ done
68+
69+ # Strategy B: Download if not found (Fallback)
70+ if [ " $MODEL_FOUND " = false ]; then
71+ echo " --> Model not found locally. Attempting download..."
72+
73+ echo " --> Downloading Ollama binary..."
74+ curl -fL " https://github.com/ollama/ollama/releases/download/v0.1.32/ollama-linux-amd64" -o " ${AI_BUILD_DIR} /ollama"
75+ chmod +x " ${AI_BUILD_DIR} /ollama"
76+
77+ # Set HOME to AI_BUILD_DIR so ollama writes there cleanly
78+ export HOME=" ${AI_BUILD_DIR} "
79+
80+ echo " --> Starting temporary Ollama server..."
81+ " ${AI_BUILD_DIR} /ollama" serve > " ${AI_BUILD_DIR} /server.log" 2>&1 &
82+ OLLAMA_PID=$!
83+ echo " Waiting 10s for server..."
84+ sleep 10
85+
86+ if ! kill -0 $OLLAMA_PID 2> /dev/null; then
87+ echo " ERROR: Ollama server crashed immediately."
88+ echo " --- Server Log ---"
89+ cat " ${AI_BUILD_DIR} /server.log"
90+ echo " ------------------"
7191 exit 1
72- else
73- echo " SUCCESS: AI Models found (${SIZE_CHECK} KB)."
7492 fi
75- else
76- echo " ERROR: Could not find downloaded models at ${LOCAL_MODEL_DIR} "
93+
94+ echo " --> Pulling base model (llama3)..."
95+ " ${AI_BUILD_DIR} /ollama" pull llama3
96+
97+ echo " --> Stopping server..."
98+ kill ${OLLAMA_PID} || true
99+
100+ # Move from the temp HOME structure to our target
101+ if [ -d " ${AI_BUILD_DIR} /.ollama/models" ]; then
102+ cp -r " ${AI_BUILD_DIR} /.ollama/models/." " ${TARGET_MODEL_DIR} /"
103+ fi
104+ fi
105+
106+ # Final Verification
107+ SIZE_CHECK=$( du -s " ${TARGET_MODEL_DIR} " | cut -f1)
108+ if [ " $SIZE_CHECK " -lt 1000000 ]; then
109+ echo " ERROR: Model preparation failed. Target directory is too small ($SIZE_CHECK KB)."
77110 exit 1
111+ else
112+ echo " SUCCESS: AI Models prepared (${SIZE_CHECK} KB)."
78113fi
79114
80115
@@ -107,12 +142,16 @@ mkdir -p "${CHROOT_DIR}/usr/share/wallpapers/luminos"
107142cp " ${BASE_DIR} /assets/" * " ${CHROOT_DIR} /usr/share/wallpapers/luminos/"
108143
109144echo " --> Injecting AI files into system..."
145+ # Copy binary (download if not already there from fallback)
146+ if [ ! -f " ${AI_BUILD_DIR} /ollama" ]; then
147+ curl -fL " https://github.com/ollama/ollama/releases/download/v0.1.32/ollama-linux-amd64" -o " ${AI_BUILD_DIR} /ollama"
148+ chmod +x " ${AI_BUILD_DIR} /ollama"
149+ fi
110150cp " ${AI_BUILD_DIR} /ollama" " ${CHROOT_DIR} /usr/local/bin/"
111- # Create the directory structure exactly as Ollama expects
151+
152+ # Copy models
112153mkdir -p " ${CHROOT_DIR} /usr/share/ollama/.ollama"
113- # Copy the models from our LOCAL build dir to the ISO's ollama user location
114- echo " --> Copying models to Chroot..."
115- cp -r " ${LOCAL_MODEL_DIR} " " ${CHROOT_DIR} /usr/share/ollama/.ollama/"
154+ cp -r " ${TARGET_MODEL_DIR} " " ${CHROOT_DIR} /usr/share/ollama/.ollama/"
116155echo " --> AI Injection Complete."
117156
118157
0 commit comments