diff --git a/src/ai/backend_onnx.c b/src/ai/backend_onnx.c index 48916e8a7722..45e1c14101db 100644 --- a/src/ai/backend_onnx.c +++ b/src/ai/backend_onnx.c @@ -78,40 +78,72 @@ static void _stderr_suppress_end(int saved) } #endif +// Load ORT API from a dynamically loaded module. Returns NULL on failure. +static const OrtApi *_ort_api_from_module(GModule *mod, const char *label) +{ + typedef const OrtApiBase *(*OrtGetApiBaseFn)(void); + OrtGetApiBaseFn get_api_base = NULL; + if(!g_module_symbol(mod, "OrtGetApiBase", (gpointer *)&get_api_base) || !get_api_base) + { + dt_print(DT_DEBUG_AI, "[darktable_ai] OrtGetApiBase symbol not found in '%s'", label); + return NULL; + } + dt_print(DT_DEBUG_AI, "[darktable_ai] loaded ORT %s from '%s'", + get_api_base()->GetVersionString(), label); + return get_api_base()->GetApi(ORT_API_VERSION); +} + static gpointer _init_ort_api(gpointer data) { (void)data; const OrtApi *api = NULL; -#ifdef ORT_LAZY_LOAD - // Ubuntu/Debian's system ORT links against libonnx, causing harmless but noisy - // "already registered" ONNX schema warnings when the library is first loaded. - // suppress them by loading ORT explicitly, with stderr temporarily redirected. - // G_MODULE_BIND_LAZY = RTLD_LAZY; default (no BIND_LOCAL) = RTLD_GLOBAL so - // provider symbols remain visible to the rest of the process via dlsym(NULL). - const int saved = _stderr_suppress_begin(); - // the handle is intentionally not stored: ORT must stay loaded for the process - // lifetime and g_module_close is never called, so the library stays resident. - GModule *ort_mod = g_module_open(ORT_LIBRARY_PATH, G_MODULE_BIND_LAZY); - _stderr_suppress_end(saved); + // DT_ORT_LIBRARY allows users to point to a GPU-enabled ORT build + // (e.g. CUDA or ROCm) without rebuilding darktable. On Linux this + // overrides the compile-time default; on Windows/macOS it dynamically + // loads a user-supplied library instead of the bundled DirectML/CoreML one. + const char *ort_override = g_getenv("DT_ORT_LIBRARY"); - if(!ort_mod) + if(ort_override && ort_override[0]) { - dt_print(DT_DEBUG_AI, - "[darktable_ai] failed to load ORT library '%s': %s", - ORT_LIBRARY_PATH, g_module_error()); - return NULL; + GModule *ort_mod = g_module_open(ort_override, G_MODULE_BIND_LAZY); + if(!ort_mod) + { + dt_print(DT_DEBUG_AI, + "[darktable_ai] failed to load ORT library '%s': %s", + ort_override, g_module_error()); + return NULL; + } + api = _ort_api_from_module(ort_mod, ort_override); } - typedef const OrtApiBase *(*OrtGetApiBaseFn)(void); - OrtGetApiBaseFn get_api_base = NULL; - if(!g_module_symbol(ort_mod, "OrtGetApiBase", (gpointer *)&get_api_base) || !get_api_base) +#ifdef ORT_LAZY_LOAD + else { - dt_print(DT_DEBUG_AI, "[darktable_ai] OrtGetApiBase symbol not found"); - return NULL; + // Linux default: lazy-load the bundled or system ORT library. + // Suppress stderr during load - Ubuntu/Debian's system ORT links against + // libonnx, causing harmless "already registered" ONNX schema warnings. + const int saved = _stderr_suppress_begin(); + GModule *ort_mod = g_module_open(ORT_LIBRARY_PATH, G_MODULE_BIND_LAZY); + _stderr_suppress_end(saved); + + if(!ort_mod) + { + dt_print(DT_DEBUG_AI, + "[darktable_ai] failed to load ORT library '%s': %s", + ORT_LIBRARY_PATH, g_module_error()); + return NULL; + } + api = _ort_api_from_module(ort_mod, ORT_LIBRARY_PATH); } - api = get_api_base()->GetApi(ORT_API_VERSION); #else - api = OrtGetApiBase()->GetApi(ORT_API_VERSION); + else + { + // Windows/macOS: use the directly linked ORT library (DirectML/CoreML). + const OrtApiBase *base = OrtGetApiBase(); + dt_print(DT_DEBUG_AI, "[darktable_ai] loaded ORT %s (bundled)", + base->GetVersionString()); + api = base->GetApi(ORT_API_VERSION); + } #endif if(!api) @@ -876,7 +908,7 @@ dt_ai_onnx_load_ext(const char *model_dir, const char *model_file, { ctx->dynamic_outputs = TRUE; dt_print(DT_DEBUG_AI, - "[darktable_ai] output[%zu] has dynamic dims — using ORT-allocated outputs", + "[darktable_ai] output[%zu] has dynamic dims - using ORT-allocated outputs", i); break; } diff --git a/tools/ai/README.md b/tools/ai/README.md new file mode 100644 index 000000000000..0215f2dbbc06 --- /dev/null +++ b/tools/ai/README.md @@ -0,0 +1,172 @@ +# GPU-Accelerated ONNX Runtime for darktable + +darktable bundles a CPU-only ONNX Runtime by default. On Linux, it also +bundles DirectML on Windows and CoreML on macOS. These scripts install a +GPU-enabled ORT build to accelerate AI features (denoise, upscale, +segmentation). + +## What's bundled by default + +| Platform | Bundled ORT | GPU support | +|----------|------------|-------------| +| Linux | CPU only | None – use scripts below | +| Windows | DirectML | AMD, NVIDIA, Intel via DirectX 12 | +| macOS | CoreML | Apple Silicon Neural Engine | + +## Installing GPU-accelerated ORT + +### NVIDIA (CUDA) – Linux & Windows + +**Requirements:** + +- NVIDIA GPU with compute capability 6.0+ (GeForce GTX 1000 "Pascal" or newer) +- NVIDIA driver 525 or later +- CUDA 12.x runtime – included with the driver on Windows; on Linux install + the CUDA toolkit (`nvidia-cuda-toolkit` on Ubuntu/Debian, `cuda` on Arch) +- cuDNN 9.x – download from https://developer.nvidia.com/cudnn-downloads or + install via package manager (`libcudnn9-cuda-12` on Ubuntu/Debian, `cudnn` + on Arch) + +Linux: +```bash +./tools/ai/install-ort-nvidia.sh +``` + +Windows (PowerShell): +```powershell +.\tools\ai\install-ort-nvidia.ps1 +``` + +Downloads a prebuilt ORT with CUDA EP from GitHub (~200 MB, ~30 sec). +On Windows, use this instead of the bundled DirectML for potentially +better NVIDIA performance. + +### AMD (MIGraphX) – Linux + +**Requirements:** + +- AMD GPU supported by ROCm: + - Consumer: Radeon RX 6000 series (RDNA2) or newer + - Data center: Instinct MI100 (CDNA) or newer +- ROCm 6.0 or later – install from AMD's repo: + https://rocm.docs.amd.com/projects/install-on-linux/en/latest/ + - Ubuntu/Debian: `sudo apt install rocm` + - Arch: `sudo pacman -S rocm-hip-sdk` + - Fedora: `sudo dnf install rocm` +- MIGraphX (included in ROCm, or install separately): + - Ubuntu/Debian: `sudo apt install migraphx migraphx-dev` + - Arch: `sudo pacman -S migraphx` +- For building from source: cmake 3.26+, gcc/g++, python3, git + +Prebuilt (fast, ~30 sec): +```bash +./tools/ai/install-ort-amd.sh +``` + +Build from source (fallback if prebuilt doesn't work, 10-20 min): +```bash +./tools/ai/install-ort-amd-build.sh +``` + +The prebuilt script downloads a wheel from AMD's package repository. The +build script compiles ORT against your installed ROCm headers and +libraries – use it if the prebuilt version has ABI compatibility issues. +Both auto-detect your ROCm version and select the matching ORT release: + +| ROCm | ORT version | +|------|-------------| +| 7.2 | 1.23.2 | +| 7.1 | 1.23.1 | +| 7.0 | 1.22.1 | +| 6.4 | 1.21.0 | +| 6.3 | 1.19.0 | +| 6.2 | 1.18.0 | +| 6.1 | 1.17.0 | +| 6.0 | 1.16.0 | + +### Intel (OpenVINO) – Linux + +**Requirements:** + +- Intel GPU or any x86_64 CPU: + - Integrated: HD Graphics, UHD Graphics, Iris Xe (Gen9+) + - Discrete: Intel Arc A-series (A770, A750, A580, etc.) + - CPU-only mode works on any x86_64 processor (Intel or AMD) +- For GPU acceleration: Intel compute runtime with Level Zero + - Ubuntu/Debian: `sudo apt install intel-opencl-icd level-zero` + - Arch: `sudo pacman -S intel-compute-runtime level-zero-loader` + - For Arc GPUs: kernel 6.2 or later recommended +- pip3 (for downloading the wheel) +- OpenVINO runtime is bundled in the package – no separate install needed + +```bash +./tools/ai/install-ort-intel.sh +``` + +Downloads a prebuilt ORT with OpenVINO EP from PyPI (~60 MB, ~30 sec). +Includes all OpenVINO runtime libraries. + +## Using the custom ORT + +All scripts install to `~/.local/lib/onnxruntime-/` and print +the path to use. Set the `DT_ORT_LIBRARY` environment variable to point +darktable to the custom build: + +```bash +DT_ORT_LIBRARY=~/.local/lib/onnxruntime-cuda/libonnxruntime.so.1.24.4 darktable +``` + +Or add to `~/.bashrc` for persistence: +```bash +export DT_ORT_LIBRARY=~/.local/lib/onnxruntime-cuda/libonnxruntime.so.1.24.4 +``` + +On Windows (PowerShell): +```powershell +$env:DT_ORT_LIBRARY="C:\Users\you\AppData\Local\onnxruntime-cuda\onnxruntime.dll" +darktable +``` + +Or set permanently via System → Environment Variables. + +If `DT_ORT_LIBRARY` is not set, darktable uses the bundled ORT (CPU on +Linux, DirectML on Windows, CoreML on macOS). + +## Manual installation (without scripts) + +If you prefer to install manually or the scripts don't work for your setup: + +1. **Get an ORT shared library with your desired EP compiled in:** + - NVIDIA CUDA: download `onnxruntime-linux-x64-gpu-VERSION.tgz` (Linux) + or `onnxruntime-win-x64-gpu-VERSION.zip` (Windows) from + https://github.com/microsoft/onnxruntime/releases + - AMD MIGraphX: download `onnxruntime_rocm` wheel from + https://repo.radeon.com/rocm/manylinux/ (match your ROCm version) + or build from source: `./build.sh --config Release --build_shared_lib --use_migraphx --migraphx_home /opt/rocm` + - Intel OpenVINO: `pip download --no-deps onnxruntime-openvino` + +2. **Extract the shared library:** + - `.tgz`/`.zip`: extract `lib/libonnxruntime.so*` (or `lib/onnxruntime.dll`) + - `.whl`: rename to `.zip` and extract `onnxruntime/capi/libonnxruntime.so*` + and any `libonnxruntime_providers_*.so` files + +3. **Point darktable to it:** + ```bash + export DT_ORT_LIBRARY=/path/to/libonnxruntime.so.X.Y.Z + ``` + +## Verifying + +Run darktable with AI debug output to confirm which ORT is loaded: + +```bash +DT_ORT_LIBRARY=... darktable -d ai +``` + +Look for: +``` +[darktable_ai] loaded ORT 1.24.4 from '/home/user/.local/lib/onnxruntime-cuda/libonnxruntime.so.1.24.4' +``` + +Then check Preferences → Processing → AI execution provider to select +your GPU provider (CUDA, MIGraphX, OpenVINO). diff --git a/tools/ai/install-ort-amd-build.sh b/tools/ai/install-ort-amd-build.sh new file mode 100755 index 000000000000..8d6e7620ec23 --- /dev/null +++ b/tools/ai/install-ort-amd-build.sh @@ -0,0 +1,243 @@ +#!/bin/bash +# +# Build and install ONNX Runtime with MIGraphX ExecutionProvider +# for darktable AI acceleration on AMD GPUs. +# +# Unlike NVIDIA (which has pre-built packages), MIGraphX EP must be +# built from source to match the installed ROCm version. +# +# Requirements: +# - AMD GPU supported by ROCm (RDNA2+, CDNA+) +# - ROCm 6.x+ with MIGraphX installed +# - Build tools: cmake 3.26+, gcc/g++, python3 +# +# Usage: install-ort-amd-build.sh [-y|--yes] [install-dir] + +set -euo pipefail + +YES=false +while [ $# -gt 0 ]; do + case "$1" in + -y|--yes) YES=true; shift ;; + *) break ;; + esac +done + +INSTALL_DIR="${1:-$HOME/.local/lib/onnxruntime-migraphx}" +ROCM_HOME="${ROCM_HOME:-/opt/rocm}" +BUILD_DIR="${TMPDIR:-/tmp}/ort-migraphx-build" + +# --- Platform checks (before user prompt) --- +if [ "$(uname -s)" != "Linux" ]; then + echo "Error: this script is for Linux only." >&2 + exit 1 +fi + +if [ "$(uname -m)" != "x86_64" ]; then + echo "Error: MIGraphX EP is only available for x86_64 (got $(uname -m))." >&2 + exit 1 +fi + +# --- Info & confirmation --- +echo "" +echo "ONNX Runtime - MIGraphX ExecutionProvider builder" +echo "===================================================" +echo "" +echo "This will build ONNX Runtime from source with AMD MIGraphX support" +echo "to enable GPU acceleration for darktable AI features" +echo "(denoise, upscale, segmentation)." +echo "" +echo "Unlike NVIDIA, there is no pre-built package - ORT must be compiled" +echo "against the ROCm version installed on your system." +echo "" +echo "Requirements:" +echo " - AMD GPU supported by ROCm (Radeon RX 6000+, Instinct MI100+)" +echo " - ROCm 6.x+ with MIGraphX" +echo " - cmake 3.26+, gcc/g++, python3, git" +echo "" +echo "Actions:" +echo " - Clone ORT source (~300 MB)" +echo " - Build with MIGraphX EP (10-20 min depending on hardware)" +echo " - Install shared libraries to: $INSTALL_DIR" +echo "" + +if [ "$YES" = false ]; then + read -rp "Continue? [y/N] " answer + if [[ ! "$answer" =~ ^[Yy] ]]; then + echo "Aborted." + exit 0 + fi + echo "" +fi + +# --- Helper: distro-specific install hint --- +distro_hint() { + local pkg_deb="$1" pkg_rpm="$2" pkg_arch="$3" pkg_suse="$4" fallback_url="$5" + if [ -f /etc/os-release ]; then + . /etc/os-release + case "$ID" in + ubuntu|debian|linuxmint|pop) + echo " Install on $NAME:" + echo " $pkg_deb" + ;; + fedora|rhel|centos|rocky|alma) + echo " Install on $NAME:" + echo " $pkg_rpm" + ;; + arch|manjaro|endeavouros) + echo " Install on $NAME:" + echo " $pkg_arch" + ;; + opensuse*|sles) + echo " Install on $NAME:" + echo " $pkg_suse" + ;; + *) + echo " Download from: $fallback_url" + return + ;; + esac + else + echo " Download from: $fallback_url" + fi +} + +# --- Check ROCm --- +if ! command -v rocminfo &>/dev/null || [ ! -d "$ROCM_HOME" ]; then + echo "Error: ROCm not found at $ROCM_HOME" + echo "" + distro_hint \ + "sudo apt install rocm (add AMD repo first: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/)" \ + "sudo dnf install rocm (add AMD repo first: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/)" \ + "sudo pacman -S rocm-hip-sdk" \ + "sudo zypper install rocm (add AMD repo first: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/)" \ + "https://rocm.docs.amd.com/projects/install-on-linux/en/latest/" + echo "" + exit 1 +fi + +ROCM_VERSION="unknown" +if [ -f "$ROCM_HOME/.info/version" ]; then + ROCM_VERSION=$(cat "$ROCM_HOME/.info/version") +fi +echo "ROCm: $ROCM_VERSION ($ROCM_HOME)" + +# --- Select ORT version matching ROCm --- +ROCM_MAJOR_MINOR=$(echo "$ROCM_VERSION" | grep -oP '^\d+\.\d+') +case "$ROCM_MAJOR_MINOR" in + # 7.3+) ORT_VERSION="1.24.4" ;; # TODO: confirm when docs are updated + 7.2*) ORT_VERSION="1.23.2" ;; + 7.1*) ORT_VERSION="1.23.1" ;; + 7.0*) ORT_VERSION="1.22.1" ;; + 6.4*) ORT_VERSION="1.21.0" ;; + 6.3*) ORT_VERSION="1.19.0" ;; + 6.2*) ORT_VERSION="1.18.0" ;; + 6.1*) ORT_VERSION="1.17.0" ;; + 6.0*) ORT_VERSION="1.16.0" ;; + *) + echo "" + echo "Error: unsupported ROCm version $ROCM_VERSION" + echo " Supported: ROCm 6.0 - 7.2" + echo " Update ROCm or set ORT_VERSION manually and re-run." + echo "" + exit 1 + ;; +esac +echo "ORT version: $ORT_VERSION (matched to ROCm $ROCM_MAJOR_MINOR)" + +# --- Check MIGraphX --- +if ! command -v migraphx-driver &>/dev/null \ + && [ ! -f "$ROCM_HOME/lib/libmigraphx.so" ] \ + && [ ! -f "$ROCM_HOME/lib64/libmigraphx.so" ]; then + echo "" + echo "Error: MIGraphX not found in $ROCM_HOME" + echo "" + distro_hint \ + "sudo apt install migraphx migraphx-dev" \ + "sudo dnf install migraphx migraphx-devel" \ + "sudo pacman -S migraphx" \ + "sudo zypper install migraphx migraphx-devel" \ + "https://rocm.docs.amd.com/projects/install-on-linux/en/latest/" + echo "" + exit 1 +fi +echo "MIGraphX: found" + +# --- Check build tools --- +MISSING="" +command -v cmake &>/dev/null || MISSING="$MISSING cmake" +command -v g++ &>/dev/null || MISSING="$MISSING g++" +command -v git &>/dev/null || MISSING="$MISSING git" +command -v python3 &>/dev/null || MISSING="$MISSING python3" + +if [ -n "$MISSING" ]; then + echo "" + echo "Error: missing build tools:$MISSING" + echo "" + distro_hint \ + "sudo apt install$MISSING" \ + "sudo dnf install$MISSING" \ + "sudo pacman -S$MISSING" \ + "sudo zypper install$MISSING" \ + "" + echo "" + exit 1 +fi + +CMAKE_VERSION=$(cmake --version | head -1 | grep -oP '[0-9]+\.[0-9]+') +echo "cmake: $CMAKE_VERSION" + +# --- Clone & build --- +echo "" +echo "Cloning ONNX Runtime v${ORT_VERSION}..." + +rm -rf "$BUILD_DIR" +mkdir -p "$BUILD_DIR" + +git clone --depth 1 --branch "v${ORT_VERSION}" \ + https://github.com/microsoft/onnxruntime.git "$BUILD_DIR/onnxruntime" + +cd "$BUILD_DIR/onnxruntime" + +# Patch Eigen hash mismatch - GitLab regenerates zip archives, breaking the +# hardcoded SHA1 in older ORT releases. Remove the URL_HASH line from +# eigen.cmake so FetchContent downloads without verification. +if [ -f "cmake/external/eigen.cmake" ]; then + sed -i '/URL_HASH/d' cmake/external/eigen.cmake + echo "Patched Eigen: removed URL_HASH check (GitLab zip archive mismatch)" +fi + +echo "" +echo "Building with MIGraphX EP (this will take 30-60 minutes)..." +echo "" + +./build.sh \ + --config Release \ + --build_shared_lib \ + --parallel \ + --skip_tests \ + --use_migraphx \ + --migraphx_home "$ROCM_HOME" + +# --- Install --- +BUILD_LIB_DIR="$BUILD_DIR/onnxruntime/build/Linux/Release" + +mkdir -p "$INSTALL_DIR" +cp "$BUILD_LIB_DIR/"libonnxruntime*.so* "$INSTALL_DIR/" + +# Clean up build tree (~2 GB) +rm -rf "$BUILD_DIR" + +ORT_SO=$(ls "$INSTALL_DIR/libonnxruntime.so."* 2>/dev/null | head -1) + +echo "" +echo "Done. Installed to: $INSTALL_DIR" +ls -lh "$INSTALL_DIR/"*.so* 2>/dev/null +echo "" +echo "To use with darktable:" +echo "" +echo " DT_ORT_LIBRARY=$ORT_SO darktable" +echo "" +echo "Or add to ~/.bashrc:" +echo "" +echo " export DT_ORT_LIBRARY=$ORT_SO" diff --git a/tools/ai/install-ort-amd.sh b/tools/ai/install-ort-amd.sh new file mode 100755 index 000000000000..511013543680 --- /dev/null +++ b/tools/ai/install-ort-amd.sh @@ -0,0 +1,174 @@ +#!/bin/bash +# +# Install ONNX Runtime with MIGraphX ExecutionProvider for darktable +# AI acceleration on AMD GPUs. +# +# This extracts native .so files from the onnxruntime_rocm wheel hosted +# on AMD's repo. Much faster than building from source (~30 sec vs +# 10-20 min) but requires the wheel's ROCm version to match your system. +# +# If this doesn't work, use install-ort-amd-build.sh to build from source. +# +# Requirements: +# - AMD GPU supported by ROCm (RDNA2+, CDNA+) +# - ROCm 6.x+ with MIGraphX installed +# - wget or curl +# +# Usage: install-ort-amd.sh [-y|--yes] [install-dir] + +set -euo pipefail + +YES=false +while [ $# -gt 0 ]; do + case "$1" in + -y|--yes) YES=true; shift ;; + *) break ;; + esac +done + +INSTALL_DIR="${1:-$HOME/.local/lib/onnxruntime-migraphx}" +ROCM_HOME="${ROCM_HOME:-/opt/rocm}" + +# --- Platform checks --- +if [ "$(uname -s)" != "Linux" ]; then + echo "Error: this script is for Linux only." >&2 + exit 1 +fi + +if [ "$(uname -m)" != "x86_64" ]; then + echo "Error: MIGraphX EP is only available for x86_64 (got $(uname -m))." >&2 + exit 1 +fi + +# --- Info & confirmation --- +echo "" +echo "ONNX Runtime - MIGraphX ExecutionProvider installer (prebuilt)" +echo "===============================================================" +echo "" +echo "This will download a prebuilt ONNX Runtime with AMD MIGraphX support" +echo "from AMD's package repository. Much faster than building from source." +echo "" +echo "Note: the prebuilt wheel is compiled against a specific ROCm version." +echo " If darktable fails to detect MIGraphX at runtime, use" +echo " install-ort-amd-build.sh to build from source instead." +echo "" +echo "Requirements:" +echo " - AMD GPU supported by ROCm (Radeon RX 6000+, Instinct MI100+)" +echo " - ROCm 6.x+ with MIGraphX" +echo " - wget or curl" +echo "" +echo "Actions:" +echo " - Download onnxruntime-migraphx wheel from AMD repo (~60 MB)" +echo " - Extract native shared libraries to: $INSTALL_DIR" +echo "" + +if [ "$YES" = false ]; then + read -rp "Continue? [y/N] " answer + if [[ ! "$answer" =~ ^[Yy] ]]; then + echo "Aborted." + exit 0 + fi + echo "" +fi + +# --- Check ROCm --- +if ! command -v rocminfo &>/dev/null || [ ! -d "$ROCM_HOME" ]; then + echo "Error: ROCm not found. Install ROCm first:" + echo " https://rocm.docs.amd.com/projects/install-on-linux/en/latest/" + echo "" + exit 1 +fi + +ROCM_VERSION="unknown" +if [ -f "$ROCM_HOME/.info/version" ]; then + ROCM_VERSION=$(cat "$ROCM_HOME/.info/version") +fi +echo "ROCm: $ROCM_VERSION ($ROCM_HOME)" + +# --- Select wheel repo matching ROCm --- +ROCM_MAJOR_MINOR=$(echo "$ROCM_VERSION" | grep -oP '^\d+\.\d+') +case "$ROCM_MAJOR_MINOR" in + 7.2*) ROCM_REPO="rocm-rel-7.2"; ORT_VERSION="1.23.2" ;; + 7.1*) ROCM_REPO="rocm-rel-7.1"; ORT_VERSION="1.23.1" ;; + 7.0*) ROCM_REPO="rocm-rel-7.0"; ORT_VERSION="1.22.1" ;; + 6.4*) ROCM_REPO="rocm-rel-6.4"; ORT_VERSION="1.21.0" ;; + 6.3*) ROCM_REPO="rocm-rel-6.3"; ORT_VERSION="1.19.0" ;; + 6.2*) ROCM_REPO="rocm-rel-6.2"; ORT_VERSION="1.18.0" ;; + 6.1*) ROCM_REPO="rocm-rel-6.1"; ORT_VERSION="1.17.0" ;; + 6.0*) ROCM_REPO="rocm-rel-6.0"; ORT_VERSION="1.16.0" ;; + *) + echo "Error: unsupported ROCm version $ROCM_VERSION" + echo " Try install-ort-amd-build.sh to build from source instead." + echo "" + exit 1 + ;; +esac +REPO_URL="https://repo.radeon.com/rocm/manylinux/$ROCM_REPO/" +echo "AMD repo: $REPO_URL" + + +# --- Download --- +TMPDIR=$(mktemp -d) +trap 'rm -rf "$TMPDIR"' EXIT + +# AMD publishes wheels as onnxruntime_rocm (not onnxruntime-migraphx) with +# platform tag linux_x86_64 (not manylinux). Download directly via URL. +WHEEL_NAME="onnxruntime_rocm-${ORT_VERSION}-cp312-cp312-linux_x86_64.whl" +WHEEL_URL="${REPO_URL}${WHEEL_NAME}" +WHEEL="$TMPDIR/$WHEEL_NAME" + +echo "Downloading $WHEEL_NAME..." +if command -v wget &>/dev/null; then + wget -q --show-progress -O "$WHEEL" "$WHEEL_URL" +elif command -v curl &>/dev/null; then + curl -fL --progress-bar -o "$WHEEL" "$WHEEL_URL" +else + echo "Error: neither wget nor curl found." >&2 + exit 1 +fi + +if [ ! -s "$WHEEL" ]; then + echo "Error: failed to download from $WHEEL_URL" + echo " Try install-ort-amd-build.sh to build from source instead." + echo "" + exit 1 +fi + +echo "Wheel: $(basename "$WHEEL")" + +# --- Extract native libraries --- +echo "Extracting shared libraries..." +unzip -q -o "$WHEEL" -d "$TMPDIR/wheel" + +mkdir -p "$INSTALL_DIR" + +# Copy ORT core library +cp "$TMPDIR/wheel/onnxruntime/capi/"libonnxruntime.so* "$INSTALL_DIR/" 2>/dev/null || true +# Copy all provider libraries (MIGraphX, ROCm, shared) +cp "$TMPDIR/wheel/onnxruntime/capi/"libonnxruntime_providers_*.so "$INSTALL_DIR/" 2>/dev/null || true + +ORT_SO=$(ls "$INSTALL_DIR/libonnxruntime.so."* 2>/dev/null | head -1) +if [ -z "$ORT_SO" ]; then + ORT_SO=$(ls "$INSTALL_DIR/libonnxruntime.so" 2>/dev/null | head -1) +fi + +if [ -z "$ORT_SO" ]; then + echo "Error: libonnxruntime.so not found in wheel." + echo " Try install-ort-amd-build.sh to build from source instead." + exit 1 +fi + +echo "" +echo "Done. Installed to: $INSTALL_DIR" +ls -lh "$INSTALL_DIR/"*.so* 2>/dev/null +echo "" +echo "To use with darktable:" +echo "" +echo " DT_ORT_LIBRARY=$ORT_SO darktable" +echo "" +echo "Or add to ~/.bashrc:" +echo "" +echo " export DT_ORT_LIBRARY=$ORT_SO" +echo "" +echo "If MIGraphX is not detected at runtime, build from source instead:" +echo " ./tools/ai/install-ort-amd-build.sh" diff --git a/tools/ai/install-ort-intel.sh b/tools/ai/install-ort-intel.sh new file mode 100755 index 000000000000..01435de589b7 --- /dev/null +++ b/tools/ai/install-ort-intel.sh @@ -0,0 +1,188 @@ +#!/bin/bash +# +# Install ONNX Runtime with OpenVINO ExecutionProvider for darktable AI +# acceleration on Intel GPUs (and CPUs). +# +# The onnxruntime-openvino pip wheel bundles OpenVINO runtime libraries, +# so no separate OpenVINO installation is needed. This script extracts +# the native shared libraries from the wheel - no Python required at runtime. +# +# Requirements: +# - Intel GPU (HD/UHD/Iris/Arc) or CPU +# - Level Zero runtime for GPU acceleration (optional, CPU works without) +# +# Usage: install-ort-intel.sh [-y|--yes] [install-dir] + +set -euo pipefail + +YES=false +while [ $# -gt 0 ]; do + case "$1" in + -y|--yes) YES=true; shift ;; + *) break ;; + esac +done + +ORT_OPENVINO_VERSION="1.24.1" +INSTALL_DIR="${1:-$HOME/.local/lib/onnxruntime-openvino}" + +# --- Platform checks --- +if [ "$(uname -s)" != "Linux" ]; then + echo "Error: this script is for Linux only." >&2 + exit 1 +fi + +ARCH=$(uname -m) +if [ "$ARCH" != "x86_64" ]; then + echo "Error: OpenVINO EP is only available for x86_64 (got $ARCH)." >&2 + exit 1 +fi + +# --- Info & confirmation --- +echo "" +echo "ONNX Runtime ${ORT_OPENVINO_VERSION} - OpenVINO ExecutionProvider installer" +echo "=========================================================================" +echo "" +echo "This will download and install ONNX Runtime with Intel OpenVINO support" +echo "to enable GPU/CPU acceleration for darktable AI features" +echo "(denoise, upscale, segmentation)." +echo "" +echo "OpenVINO runtime libraries are bundled - no separate install needed." +echo "" +echo "Requirements:" +echo " - Intel GPU (HD/UHD/Iris Xe/Arc) or any x86_64 CPU" +echo " - OpenCL runtime for GPU acceleration (optional, CPU works without)" +echo "" +echo "Actions:" +echo " - Download onnxruntime-openvino wheel from PyPI (~60 MB)" +echo " - Extract native shared libraries to: $INSTALL_DIR" +echo "" + +if [ "$YES" = false ]; then + read -rp "Continue? [y/N] " answer + if [[ ! "$answer" =~ ^[Yy] ]]; then + echo "Aborted." + exit 0 + fi + echo "" +fi + +# --- Helper: distro-specific install hint --- +distro_hint() { + local pkg_deb="$1" pkg_rpm="$2" pkg_arch="$3" pkg_suse="$4" fallback_url="$5" + if [ -f /etc/os-release ]; then + . /etc/os-release + case "$ID" in + ubuntu|debian|linuxmint|pop) + echo " Install on $NAME:" + echo " $pkg_deb" + ;; + fedora|rhel|centos|rocky|alma) + echo " Install on $NAME:" + echo " $pkg_rpm" + ;; + arch|manjaro|endeavouros) + echo " Install on $NAME:" + echo " $pkg_arch" + ;; + opensuse*|sles) + echo " Install on $NAME:" + echo " $pkg_suse" + ;; + *) + echo " Download from: $fallback_url" + return + ;; + esac + else + echo " Download from: $fallback_url" + fi +} + +# --- Check Intel GPU compute runtime (optional, for GPU) --- +if ! ldconfig -p 2>/dev/null | grep -q libze_loader; then + echo "Note: Intel Level Zero runtime not found. OpenVINO will use CPU only." + echo " For Intel GPU acceleration, install the compute runtime:" + echo "" + distro_hint \ + "sudo apt install intel-opencl-icd level-zero" \ + "sudo dnf install intel-opencl level-zero" \ + "sudo pacman -S intel-compute-runtime level-zero-loader" \ + "sudo zypper install intel-opencl level-zero" \ + "https://github.com/intel/compute-runtime/releases" + echo "" +fi + +# --- Determine wheel URL --- +# PyPI wheel naming: onnxruntime_openvino-VERSION-cpXX-cpXX-manylinux_2_28_x86_64.whl +# We pick cp312 as a common target - the native .so files are the same for all Python versions. +WHEEL_NAME="onnxruntime_openvino-${ORT_OPENVINO_VERSION}-cp312-cp312-manylinux_2_28_x86_64.whl" +WHEEL_URL="https://files.pythonhosted.org/packages/cp312/${WHEEL_NAME}" + +# PyPI doesn't have stable direct URLs - use pip download to get the wheel. +if ! command -v pip3 &>/dev/null && ! command -v pip &>/dev/null; then + echo "Error: pip is required to download the wheel." + echo "" + distro_hint \ + "sudo apt install python3-pip" \ + "sudo dnf install python3-pip" \ + "sudo pacman -S python-pip" \ + "sudo zypper install python3-pip" \ + "" + echo "" + exit 1 +fi + +PIP=$(command -v pip3 || command -v pip) + +# --- Download --- +TMPDIR=$(mktemp -d) +trap 'rm -rf "$TMPDIR"' EXIT + +echo "Downloading onnxruntime-openvino ${ORT_OPENVINO_VERSION}..." +$PIP download \ + --no-deps \ + --only-binary=:all: \ + --platform manylinux_2_28_x86_64 \ + --python-version 312 \ + --dest "$TMPDIR" \ + "onnxruntime-openvino==${ORT_OPENVINO_VERSION}" 2>&1 | tail -3 + +WHEEL=$(ls "$TMPDIR"/*.whl 2>/dev/null | head -1) +if [ -z "$WHEEL" ]; then + echo "Error: failed to download wheel." >&2 + exit 1 +fi + +# --- Extract native libraries --- +echo "Extracting shared libraries..." +unzip -q -o "$WHEEL" -d "$TMPDIR/wheel" + +mkdir -p "$INSTALL_DIR" + +# Copy ORT core library +cp "$TMPDIR/wheel/onnxruntime/capi/"libonnxruntime.so* "$INSTALL_DIR/" 2>/dev/null || true +# Copy OpenVINO provider library +cp "$TMPDIR/wheel/onnxruntime/capi/"libonnxruntime_providers_openvino.so "$INSTALL_DIR/" 2>/dev/null || true +# Copy bundled OpenVINO runtime libraries +cp "$TMPDIR/wheel/onnxruntime/capi/"libopenvino*.so* "$INSTALL_DIR/" 2>/dev/null || true +# Copy any other dependency .so files +cp "$TMPDIR/wheel/onnxruntime/capi/"libtbb*.so* "$INSTALL_DIR/" 2>/dev/null || true + +ORT_SO=$(ls "$INSTALL_DIR/libonnxruntime.so."* 2>/dev/null | head -1) +if [ -z "$ORT_SO" ]; then + # Some wheels use plain libonnxruntime.so without version suffix + ORT_SO=$(ls "$INSTALL_DIR/libonnxruntime.so" 2>/dev/null | head -1) +fi + +echo "" +echo "Done. Installed to: $INSTALL_DIR" +ls -lh "$INSTALL_DIR/"*.so* 2>/dev/null +echo "" +echo "To use with darktable:" +echo "" +echo " DT_ORT_LIBRARY=$ORT_SO darktable" +echo "" +echo "Or add to ~/.bashrc:" +echo "" +echo " export DT_ORT_LIBRARY=$ORT_SO" diff --git a/tools/ai/install-ort-nvidia.ps1 b/tools/ai/install-ort-nvidia.ps1 new file mode 100644 index 000000000000..de415d0ca414 --- /dev/null +++ b/tools/ai/install-ort-nvidia.ps1 @@ -0,0 +1,148 @@ +# +# Install ONNX Runtime with CUDA ExecutionProvider for darktable AI acceleration. +# +# Requirements: +# - NVIDIA GPU with CUDA compute capability 6.0+ +# - CUDA 12.x runtime (driver 525+) +# - cuDNN 9.x +# +# Usage: .\install-ort-nvidia.ps1 [-Yes] [-InstallDir ] + +param( + [switch]$Yes, + [string]$InstallDir = "$env:LOCALAPPDATA\onnxruntime-cuda" +) + +$ErrorActionPreference = "Stop" + +$OrtVersion = "1.24.4" +$Package = "onnxruntime-win-x64-gpu-$OrtVersion" +$Url = "https://github.com/microsoft/onnxruntime/releases/download/v$OrtVersion/$Package.zip" + +# --- Info & confirmation --- +Write-Host "" +Write-Host "ONNX Runtime $OrtVersion - CUDA ExecutionProvider installer" +Write-Host "================================================================" +Write-Host "" +Write-Host "This will download and install a GPU-accelerated ONNX Runtime build" +Write-Host "to enable NVIDIA CUDA acceleration for darktable AI features" +Write-Host "(denoise, upscale, segmentation)." +Write-Host "" +Write-Host "Requirements:" +Write-Host " - NVIDIA GPU with compute capability 6.0+ (Pascal or newer)" +Write-Host " - NVIDIA driver 525+ with CUDA 12.x support" +Write-Host " - cuDNN 9.x (download from https://developer.nvidia.com/cudnn-downloads)" +Write-Host "" +Write-Host "Actions:" +Write-Host " - Download prebuilt package from GitHub (~200 MB)" +Write-Host " $Url" +Write-Host " - Install to: $InstallDir" +Write-Host "" + +if (-not $Yes) { + $answer = Read-Host "Continue? [y/N]" + if ($answer -notmatch '^[Yy]') { + Write-Host "Aborted." + exit 0 + } + Write-Host "" +} + +# --- Check NVIDIA driver --- +$nvidiaSmi = Get-Command nvidia-smi -ErrorAction SilentlyContinue +if (-not $nvidiaSmi) { + Write-Host "Warning: nvidia-smi not found - NVIDIA driver may not be installed." + Write-Host "" + Write-Host " Download NVIDIA driver from: https://www.nvidia.com/drivers" + Write-Host " A reboot is typically required after driver installation." + Write-Host " Re-run this script afterwards." + Write-Host "" + exit 1 +} + +$driverVersion = (nvidia-smi --query-gpu=driver_version --format=csv,noheader 2>$null | Select-Object -First 1).Trim() +Write-Host "NVIDIA driver: $driverVersion" + +# --- Check CUDA toolkit --- +$nvcc = Get-Command nvcc -ErrorAction SilentlyContinue +if ($nvcc) { + $cudaVersion = (nvcc --version | Select-String 'release (\d+\.\d+)').Matches.Groups[1].Value + Write-Host "CUDA toolkit: $cudaVersion" +} else { + Write-Host "" + Write-Host "Note: CUDA toolkit not found. ORT bundles its own CUDA runtime libraries," + Write-Host " but installing the toolkit ensures full compatibility." + Write-Host "" + Write-Host " Download from: https://developer.nvidia.com/cuda-downloads" + Write-Host "" +} + +# --- Check cuDNN --- +$cudnnFound = $false +$cudnnPaths = @( + "$env:CUDA_PATH\bin\cudnn*.dll", + "$env:ProgramFiles\NVIDIA\CUDNN\*\bin\cudnn*.dll" +) +foreach ($pattern in $cudnnPaths) { + if (Test-Path $pattern) { + $cudnnDll = (Get-Item $pattern | Select-Object -First 1).FullName + Write-Host "cuDNN: $cudnnDll" + $cudnnFound = $true + break + } +} +if (-not $cudnnFound) { + Write-Host "" + Write-Host "Warning: cuDNN not found. CUDA EP requires cuDNN 9.x." + Write-Host "" + Write-Host " Download from: https://developer.nvidia.com/cudnn-downloads" + Write-Host "" + Write-Host " You can install cuDNN after this script finishes - darktable will" + Write-Host " detect it at startup." + Write-Host "" +} + +# --- Download --- +$tmpDir = Join-Path $env:TEMP "ort-cuda-install" +if (Test-Path $tmpDir) { Remove-Item -Recurse -Force $tmpDir } +New-Item -ItemType Directory -Path $tmpDir | Out-Null + +$zipPath = Join-Path $tmpDir "ort-gpu.zip" + +Write-Host "Downloading ORT $OrtVersion with CUDA EP..." +try { + $ProgressPreference = 'SilentlyContinue' + Invoke-WebRequest -Uri $Url -OutFile $zipPath -UseBasicParsing +} catch { + Write-Host "Error: download failed from $Url" -ForegroundColor Red + Write-Host $_.Exception.Message + exit 1 +} + +# --- Install --- +Write-Host "Extracting..." +Expand-Archive -Path $zipPath -DestinationPath $tmpDir -Force + +if (-not (Test-Path $InstallDir)) { + New-Item -ItemType Directory -Path $InstallDir | Out-Null +} + +$srcLib = Join-Path $tmpDir $Package "lib" +Copy-Item "$srcLib\*.dll" -Destination $InstallDir -Force + +# Clean up +Remove-Item -Recurse -Force $tmpDir + +$ortDll = Get-Item "$InstallDir\onnxruntime.dll" -ErrorAction SilentlyContinue + +Write-Host "" +Write-Host "Done. Installed to: $InstallDir" +Get-ChildItem "$InstallDir\*.dll" | Format-Table Name, Length -AutoSize +Write-Host "" +Write-Host "To use with darktable:" +Write-Host "" +Write-Host " `$env:DT_ORT_LIBRARY=`"$($ortDll.FullName)`"; darktable" +Write-Host "" +Write-Host "Or set it permanently:" +Write-Host "" +Write-Host " [Environment]::SetEnvironmentVariable('DT_ORT_LIBRARY', '$($ortDll.FullName)', 'User')" diff --git a/tools/ai/install-ort-nvidia.sh b/tools/ai/install-ort-nvidia.sh new file mode 100755 index 000000000000..c86e83f50e76 --- /dev/null +++ b/tools/ai/install-ort-nvidia.sh @@ -0,0 +1,200 @@ +#!/bin/bash +# +# Install ONNX Runtime with CUDA ExecutionProvider for darktable AI acceleration. +# +# Requirements: +# - NVIDIA GPU with CUDA compute capability 6.0+ +# - CUDA 12.x runtime (driver 525+) +# - cuDNN 9.x +# +# The prebuilt ORT GPU package from GitHub includes the CUDA and cuDNN +# execution provider libraries. darktable probes for CUDA EP at runtime +# via dlsym - no rebuild required. +# +# Usage: install-ort-nvidia.sh [-y|--yes] [install-dir] + +set -euo pipefail + +YES=false +while [ $# -gt 0 ]; do + case "$1" in + -y|--yes) YES=true; shift ;; + *) break ;; + esac +done + +ORT_VERSION="1.24.4" +INSTALL_DIR="${1:-$HOME/.local/lib/onnxruntime-cuda}" +PACKAGE="onnxruntime-linux-x64-gpu-${ORT_VERSION}" +URL="https://github.com/microsoft/onnxruntime/releases/download/v${ORT_VERSION}/${PACKAGE}.tgz" + +# --- Platform checks (before user prompt) --- +if [ "$(uname -s)" != "Linux" ]; then + echo "Error: this script is for Linux only." >&2 + exit 1 +fi + +if [ "$(uname -m)" != "x86_64" ]; then + echo "Error: CUDA EP is only available for x86_64 (got $(uname -m))." >&2 + exit 1 +fi + +# --- Info & confirmation --- +echo "" +echo "ONNX Runtime ${ORT_VERSION} - CUDA ExecutionProvider installer" +echo "================================================================" +echo "" +echo "This will download and install a GPU-accelerated ONNX Runtime build" +echo "to enable NVIDIA CUDA acceleration for darktable AI features" +echo "(denoise, upscale, segmentation)." +echo "" +echo "Requirements:" +echo " - NVIDIA GPU with compute capability 6.0+ (Pascal or newer)" +echo " - NVIDIA driver 525+ with CUDA 12.x support" +echo " - cuDNN 9.x (install via your distro packages or NVIDIA repos)" +echo "" +echo "Actions:" +echo " - Download prebuilt package from GitHub (~200 MB)" +echo " $URL" +echo " - Install shared libraries to: $INSTALL_DIR" +echo "" + +if [ "$YES" = false ]; then + read -rp "Continue? [y/N] " answer + if [[ ! "$answer" =~ ^[Yy] ]]; then + echo "Aborted." + exit 0 + fi + echo "" +fi + +# --- Helper: distro-specific install hint --- +distro_hint() { + local pkg_deb="$1" pkg_rpm="$2" pkg_arch="$3" pkg_suse="$4" fallback_url="$5" + if [ -f /etc/os-release ]; then + . /etc/os-release + case "$ID" in + ubuntu|debian|linuxmint|pop) + echo " Install on $NAME:" + echo " $pkg_deb" + ;; + fedora|rhel|centos|rocky|alma) + echo " Install on $NAME:" + echo " $pkg_rpm" + ;; + arch|manjaro|endeavouros) + echo " Install on $NAME:" + echo " $pkg_arch" + ;; + opensuse*|sles) + echo " Install on $NAME:" + echo " $pkg_suse" + ;; + *) + echo " Download from: $fallback_url" + return + ;; + esac + else + echo " Download from: $fallback_url" + fi +} + +# --- Check NVIDIA driver --- +if ! command -v nvidia-smi &>/dev/null; then + echo "Warning: nvidia-smi not found - NVIDIA driver may not be installed." + echo "" + distro_hint \ + "sudo apt install nvidia-driver-550" \ + "sudo dnf install akmod-nvidia (RPM Fusion required: https://rpmfusion.org/Configuration)" \ + "sudo pacman -S nvidia" \ + "sudo zypper install nvidia-driver-G06" \ + "https://www.nvidia.com/drivers" + echo "" + echo " A reboot is typically required after driver installation." + echo " Re-run this script afterwards." + echo "" + exit 1 +fi + +DRIVER_VERSION=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader 2>/dev/null | head -1) +echo "NVIDIA driver: $DRIVER_VERSION" + +# --- Check CUDA toolkit --- +if command -v nvcc &>/dev/null; then + CUDA_VERSION=$(nvcc --version | grep -oP 'release \K[0-9]+\.[0-9]+') + echo "CUDA toolkit: $CUDA_VERSION" +else + echo "" + echo "Note: CUDA toolkit not found. ORT bundles its own CUDA runtime libraries," + echo " but installing the toolkit ensures full compatibility." + echo "" + distro_hint \ + "sudo apt install nvidia-cuda-toolkit" \ + "sudo dnf install cuda-toolkit (NVIDIA repo: https://developer.nvidia.com/cuda-downloads)" \ + "sudo pacman -S cuda" \ + "sudo zypper install cuda-toolkit (NVIDIA repo: https://developer.nvidia.com/cuda-downloads)" \ + "https://developer.nvidia.com/cuda-downloads" + echo "" +fi + +# --- Check cuDNN --- +CUDNN_FOUND=false +if ldconfig -p 2>/dev/null | grep -q libcudnn; then + CUDNN_FOUND=true + CUDNN_LIB=$(ldconfig -p 2>/dev/null | grep 'libcudnn.so' | head -1 | awk '{print $NF}') + echo "cuDNN: $CUDNN_LIB" +fi + +if [ "$CUDNN_FOUND" = false ]; then + echo "" + echo "Warning: cuDNN not found. CUDA EP requires cuDNN 9.x to be installed." + echo "" + distro_hint \ + "sudo apt install libcudnn9-cuda-12" \ + "sudo dnf install libcudnn9-cuda-12" \ + "sudo pacman -S cudnn" \ + "sudo zypper install libcudnn9-cuda-12" \ + "https://developer.nvidia.com/cudnn-downloads" + echo "" + echo " If the package is not found, add the NVIDIA repo first:" + echo " https://developer.nvidia.com/cudnn-downloads" + echo "" + echo " You can install cuDNN after this script finishes - darktable will" + echo " detect it at startup." + echo "" +fi + +# --- Download --- +TMPDIR=$(mktemp -d) +trap 'rm -rf "$TMPDIR"' EXIT + +echo "Downloading ORT ${ORT_VERSION} with CUDA EP..." +if command -v wget &>/dev/null; then + wget -q --show-progress -O "$TMPDIR/ort-gpu.tgz" "$URL" +elif command -v curl &>/dev/null; then + curl -fL --progress-bar -o "$TMPDIR/ort-gpu.tgz" "$URL" +else + echo "Error: neither wget nor curl found." >&2 + exit 1 +fi + +# --- Install --- +tar xzf "$TMPDIR/ort-gpu.tgz" -C "$TMPDIR" + +mkdir -p "$INSTALL_DIR" +cp "$TMPDIR/${PACKAGE}/lib/"*.so* "$INSTALL_DIR/" + +ORT_SO=$(ls "$INSTALL_DIR/libonnxruntime.so."* 2>/dev/null | head -1) + +echo "" +echo "Done. Installed to: $INSTALL_DIR" +ls -lh "$INSTALL_DIR/"*.so* 2>/dev/null +echo "" +echo "To use with darktable:" +echo "" +echo " DT_ORT_LIBRARY=$ORT_SO darktable" +echo "" +echo "Or add to ~/.bashrc:" +echo "" +echo " export DT_ORT_LIBRARY=$ORT_SO"