diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000..8b0e0fdafb --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,9 @@ +{ + "name": "MFC Container", + "image": "sbryngelson/mfc:latest-cpu", + "workspaceFolder": "/opt/MFC", + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "editor.formatOnSave": true + }, +} \ No newline at end of file diff --git a/.github/.dockerignore b/.github/.dockerignore new file mode 100644 index 0000000000..452cfea06a --- /dev/null +++ b/.github/.dockerignore @@ -0,0 +1,82 @@ +node_modules/ +package.json +yarn.lock + +.venv/ +.vscode/ +src/*/autogen/ + +*.swo +*.swp + +*:Zone.Identifier + +.nfs* + +__pycache__ + +*.egg-info + +.DS_Store + +# NVIDIA Nsight Compute +*.nsys-rep +*.sqlite + +docs/*/initial* +docs/*/result* +docs/documentation/*-example.png +docs/documentation/examples.md + +examples/*batch/*/ +examples/**/D/* +examples/**/p* +examples/**/D_* +examples/**/*.inf +examples/**/*.inp +examples/**/*.dat +examples/**/*.o* +examples/**/silo* +examples/**/restart_data* +examples/**/*.out +examples/**/binary +examples/**/fort.1 +examples/**/*.sh +examples/**/*.err +examples/**/viz/ +examples/*.jpg +examples/*.png +examples/*/workloads/ +examples/*/run-*/ +examples/*/logs/ +examples/**/*.f90 +!examples/3D_lag_bubbles_shbubcollapse/input/lag_bubbles.dat +!examples/3D_lag_bubbles_bubblescreen/input/lag_bubbles.dat +workloads/ + +benchmarks/*batch/*/ +benchmarks/*/D/* +benchmarks/*/p* +benchmarks/*/D_* +benchmarks/*/*.inf +benchmarks/*/*.inp +benchmarks/*/*.dat +benchmarks/*/*.o* +benchmarks/*/silo* +benchmarks/*/restart_data* +benchmarks/*/*.out +benchmarks/*/binary +benchmarks/*/fort.1 +benchmarks/*/*.sh +benchmarks/*/*.err +benchmarks/*/viz/ +benchmarks/*.jpg +benchmarks/*.png + +*.mod + +# Video Files +*.mp4 +*.mov +*.mkv +*.avi \ No newline at end of file diff --git a/.github/Dockerfile b/.github/Dockerfile new file mode 100644 index 0000000000..1c50b4c09e --- /dev/null +++ b/.github/Dockerfile @@ -0,0 +1,55 @@ +ARG BASE_IMAGE +FROM ${BASE_IMAGE} + +ARG TARGET +ARG CC_COMPILER +ARG CXX_COMPILER +ARG FC_COMPILER +ARG COMPILER_PATH +ARG COMPILER_LD_LIBRARY_PATH + +RUN apt-get update -y && \ + if [ "$TARGET" != "gpu" ]; then \ + apt-get install -y \ + build-essential git make cmake gcc g++ gfortran bc\ + python3 python3-venv python3-pip \ + openmpi-bin libopenmpi-dev libfftw3-dev \ + mpich libmpich-dev; \ + else \ + apt-get install -y \ + build-essential git make cmake bc\ + python3 python3-venv python3-pip \ + libfftw3-dev \ + openmpi-bin libopenmpi-dev; \ + fi && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +ENV OMPI_ALLOW_RUN_AS_ROOT=1 +ENV OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 +ENV PATH="/opt/MFC:$PATH" + +COPY ../ /opt/MFC + +ENV CC=${CC_COMPILER} +ENV CXX=${CXX_COMPILER} +ENV FC=${FC_COMPILER} +ENV PATH="${COMPILER_PATH}:$PATH" +ENV LD_LIBRARY_PATH="${COMPILER_LD_LIBRARY_PATH}:${LD_LIBRARY_PATH:-}" + +RUN echo "TARGET=$TARGET CC=$CC_COMPILER FC=$FC_COMPILER" && \ + cd /opt/MFC && \ + if [ "$TARGET" = "gpu" ]; then \ + ./mfc.sh build --gpu -j $(nproc); \ + else \ + ./mfc.sh build -j $(nproc); \ + fi + +RUN cd /opt/MFC && \ + if [ "$TARGET" = "gpu" ]; then \ + ./mfc.sh test --dry-run --gpu -j $(nproc); \ + else \ + ./mfc.sh test --dry-run -j $(nproc); \ + fi + +WORKDIR /opt/MFC +ENTRYPOINT ["tail", "-f", "/dev/null"] \ No newline at end of file diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000000..b12c6cdc5f --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,132 @@ +name: Containerization + +on: + release: + types: [published] + workflow_dispatch: + inputs: + tag: + description: 'tag to containerize' + required: true + +concurrency: + group: Containerization + cancel-in-progress: false + +jobs: + Container: + strategy: + matrix: + config: + - { name: 'cpu', runner: 'ubuntu-22.04', base_image: 'ubuntu:22.04' } + - { name: 'gpu', runner: 'ubuntu-22.04', base_image: 'nvcr.io/nvidia/nvhpc:23.11-devel-cuda_multi-ubuntu22.04' } + - { name: 'gpu', runner: 'ubuntu-22.04-arm', base_image: 'nvcr.io/nvidia/nvhpc:23.11-devel-cuda_multi-ubuntu22.04' } + runs-on: ${{ matrix.config.runner }} + outputs: + tag: ${{ steps.clone.outputs.tag }} + steps: + - name: Free Disk Space + uses: jlumbroso/free-disk-space@main + with: + tool-cache: false + android: true + dotnet: true + haskell: true + large-packages: true + docker-images: true + swap-storage: true + + - name: Login + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Setup Buildx + uses: docker/setup-buildx-action@v3 + + - name: Setup QEMU + uses: docker/setup-qemu-action@v3 + + - name: Clone + id: clone + run: | + TAG="${{ github.event.inputs.tag || github.ref_name }}" + echo "tag=$TAG" >> $GITHUB_OUTPUT + echo "TAG=$TAG" >> $GITHUB_ENV + git clone --branch "$TAG" --depth 1 https://github.com/MFlowCode/MFC.git mfc + + - name: Stage + run: | + sudo fallocate -l 8G /swapfile + sudo chmod 600 /swapfile + sudo mkswap /swapfile + sudo swapon /swapfile + sudo mkdir -p /home/runner/tmp + export TMPDIR=/home/runner/tmp + free -h + sudo mkdir -p /mnt/share + sudo chmod 777 /mnt/share + cp -r mfc/* /mnt/share/ + cp -r mfc/.git /mnt/share/.git + cp mfc/.github/Dockerfile /mnt/share/ + cp mfc/.github/.dockerignore /mnt/share/ + docker buildx create --name mfcbuilder --driver docker-container --use + + - name: Build and push image (cpu) + if: ${{ matrix.config.name == 'cpu' }} + uses: docker/build-push-action@v6 + with: + builder: mfcbuilder + context: /mnt/share + file: /mnt/share/Dockerfile + platforms: linux/amd64,linux/arm64 + build-args: | + BASE_IMAGE=${{ matrix.config.base_image }} + TARGET=${{ matrix.config.name }} + CC_COMPILER=${{ 'gcc' }} + CXX_COMPILER=${{ 'g++' }} + FC_COMPILER=${{ 'gfortran' }} + COMPILER_PATH=${{ '/usr/bin' }} + COMPILER_LD_LIBRARY_PATH=${{ '/usr/lib' }} + tags: ${{ secrets.DOCKERHUB_USERNAME }}/mfc:${{ env.TAG }}-${{ matrix.config.name }} + push: true + + - name: Build and push image (gpu) + if: ${{ matrix.config.name == 'gpu' }} + uses: docker/build-push-action@v5 + with: + builder: default + context: /mnt/share + file: /mnt/share/Dockerfile + build-args: | + BASE_IMAGE=${{ matrix.config.base_image }} + TARGET=${{ matrix.config.name }} + CC_COMPILER=${{ 'nvc' }} + CXX_COMPILER=${{ 'nvc++' }} + FC_COMPILER=${{ 'nvfortran' }} + COMPILER_PATH=${{ '/opt/nvidia/hpc_sdk/Linux_x86_64/compilers/bin' }} + COMPILER_LD_LIBRARY_PATH=${{ '/opt/nvidia/hpc_sdk/Linux_x86_64/compilers/lib' }} + tags: ${{ secrets.DOCKERHUB_USERNAME }}/mfc:${{ env.TAG }}-${{ matrix.config.name }}-${{ matrix.config.runner}} + push: true + + manifests: + runs-on: ubuntu-latest + needs: Container + steps: + - name: Login + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Create and Push Manifest Lists + env: + TAG: ${{ needs.Container.outputs.tag }} + REGISTRY: ${{ secrets.DOCKERHUB_USERNAME }}/mfc + run: | + docker buildx imagetools create -t $REGISTRY:latest-cpu $REGISTRY:$TAG-cpu + docker manifest create $REGISTRY:$TAG-gpu $REGISTRY:$TAG-gpu-ubuntu-22.04 $REGISTRY:$TAG-gpu-ubuntu-22.04-arm + docker manifest create $REGISTRY:latest-gpu $REGISTRY:$TAG-gpu-ubuntu-22.04 $REGISTRY:$TAG-gpu-ubuntu-22.04-arm + docker manifest push $REGISTRY:$TAG-gpu + docker manifest push $REGISTRY:latest-gpu \ No newline at end of file diff --git a/README.md b/README.md index 196315ee20..4d547f7d75 100644 --- a/README.md +++ b/README.md @@ -84,8 +84,11 @@ And here is a high amplitude acoustic wave reflecting and emerging through a cir ## Getting started +For a quick start, open a GitHub Codespace to load a pre-configured docker container to get familiar with MFC commands. Click <> Code (green button at top right) → Codespaces (right tab) → + (create a codespace). -You can navigate [to this webpage](https://mflowcode.github.io/documentation/md_getting-started.html) to get started using MFC! +****Notes:**** Codespaces is a free service with a monthly quota of compute time and storage usage. It is commended for testing commands, troubleshooting, and running simple case files without the need to install dependencies and build MFC on your device. Remember to save any important files locally before closing your codespace. To learn more, read through [how docker & containers work](https://mflowcode.github.io/documentation/docker.html). + +Otherwise, you can navigate [to this webpage](https://mflowcode.github.io/documentation/md_getting-started.html) to get started using MFC! It's rather straightforward. We'll give a brief intro. here for MacOS. Using [brew](https://brew.sh), install MFC's dependencies: diff --git a/docs/documentation/docker.md b/docs/documentation/docker.md new file mode 100644 index 0000000000..531e3f4b18 --- /dev/null +++ b/docs/documentation/docker.md @@ -0,0 +1,182 @@ +# Docker + +## Navigating Docker Desktop/CLI + +- Install Docker on [Mac](https://docs.docker.com/desktop/setup/install/mac-install/), [Windows](https://docs.docker.com/desktop/setup/install/windows-install/), or [Linux](https://docs.docker.com/desktop/setup/install/linux/). + +Via Docker Desktop GUI, +- Search for [sbryngelson/mfc](https://hub.docker.com/r/sbryngelson/mfc) repository where all MFC images are stored then pull a release tag (e.g. `latest-cpu`). + + Read through **Tag Details** below to distinguish between them. Docker Desktop's left sidebar has two key tabs: **Images** stores your program copies, while **Containers** shows instances of those images. You can launch multiple containers from a single image. + +- Start a container by clicking the Run button in the Images tab. + + Use the *Exec* section to interact with MFC directly via terminal, the *Files* section to transfer files between your device and container, and the *Stats* section displays resource usage of it. + +Or via Docker CLI, + +- Pull from [sbryngelson/mfc](https://hub.docker.com/r/sbryngelson/mfc) repository and run the latest MFC container. + +```bash +docker run -it --rm --entrypoint bash sbryngelson/mfc:latest-cpu +``` +
+ +**Selecting OS/ARCH:** + +Docker by default selects the compatible architecture when pulling and running a container. However, you can manually specify your platform (i.e. `linux/amd64` for most devices or `linux/arm64`for Apple Silicon). +```bash +docker run -it --rm --entrypoint bash --platform linux/amd64 sbryngelson/mfc:latest-cpu +``` +
+ +## Running Containers + +Start a CPU container +```bash +docker run -it --rm --entrypoint bash sbryngelson/mfc:latest-cpu +``` +Start a GPU container +```bash +docker run -it --rm --entrypoint bash --gpus all sbryngelson/mfc:latest-gpu +``` +**Note:** `--gpus all` exposes the container to available GPUs, and only Nvidia GPUs are currently supported. Make sure your device CUDA version is at least 12.3 to avoid backward compatibility issues. + +*⚠️ Append the `--debug` option to the `./mfc.sh` command inside the container with **Apple Silicon** (ARM-based Architecture) to bypass any potential errors or run `./mfc.sh clean && ./mfc.sh build` if needed.* +
+
+ +**Mounting Directory:** + +Mount a directory to `mnt` inside the container to easily transfer files between the host and the container, e.g. `cp -r /mnt/`. +```bash +docker run -it --rm --entrypoint bash -v "$PWD":/mnt sbryngelson/mfc:latest-cpu +``` +
+ +**Shared Memory:** + +Increase the shared memory size to prevent MPI memory binding errors which may fail some tests and cases. Otherwise, you can disable MPI inside the container `--no-mpi`. +```bash +docker run -it --rm --entrypoint bash --shm-size= sbryngelson/mfc:latest-cpu +``` + + + + +### **For Portability,** + +On the source machine, +- Pull and save the image +```bash +docker pull sbryngelson/mfc:latest-cpu +docker save -o mfc:latest-cpu.tar sbryngelson/mfc:latest-cpu +``` +On the target machine, +- Load and run the image +```bash +docker load -i mfc:latest-cpu.tar +docker run -it --rm mfc:latest-cpu +``` + +
+ +## HPC Cluster Usage (Apptainer/Singularity) + +### **Interactive Shell** +```bash +apptainer exec --fakeroot --writable-tmpfs --bind "$PWD":/mnt docker://sbryngelson/mfc:latest-gpu bash -c "cd /opt/MFC && bash" +``` +or +```bash +apptainer shell --fakeroot --writable-tmpfs --bind "$PWD":/mnt docker://sbryngelson/mfc:latest-gpu +Apptainer>cd /opt/MFC +``` + +### **For Portability,** +On the source machine, +- Pull and translate the image into `.sif` format +```bash +apptainer build mfc:latest-gpu.sif docker://sbryngelson/mfc:latest-gpu +``` +On the target machine, +- Load and start an interactive shell +```bash +apptainer shell --fakeroot --writable-tmpfs --bind "$PWD":/mnt mfc:latest-gpu.sif +``` + + + +### Slurm Job +```bash +#!/bin/bash +#SBATCH --job-name=mfc-sim +#SBATCH --nodes=2 +#SBATCH --ntasks-per-node=12 +#SBATCH --time=06:00:00 +#SBATCH --partition= +#SBATCH --output=mfc-sim-%j.out +#SBATCH --error=mfc-sim-%j.err + +# Load required modules +module load apptainer + +cd $SLURM_SUBMIT_DIR + +# Define container image +CONTAINER="mfc:latest-gpu.sif" + +apptainer exec --fakeroot --writable-tmpfs \ +--bind "$PWD":/mnt \ + $CONTAINER \ + bash -c "cd /opt/MFC && ./mfc.sh run sim/case.py -c " +``` +Where + +`/sim` directory contains all simulation files including case setup (`case.py`). + +`--fakeroot --writable-tmpfs` are critical to: +- Enable root-like permissions inside the container without actual root access +- Allow temporary write access to the container filesystem + + + +## Tag Details +### Base Images +- CPU images (v4.3.0-latest releases) are built on **Ubuntu 22.04**. +- GPU images (v4.3.0-latest releases) are built on **NVHPC SDK 23.11 (CUDA 12.3) & Ubuntu 22.04**. + +### Tag Structure +- **`vx.x.x`** - Official MFC release versions (recommended: use `latest` release) +- **`cpu/gpu`** - Build configurations for CPU or GPU acceleration. +- **`ubuntu-xx.xx`** - Base Ubuntu version (standard = `amd64`, `-arm` = `arm64`) + +### Available Tags +``` +mfc:latest-xxx # Latest version (amd64 & arm64) +mfc:vx.x.x-cpu # CPU version (amd64 & arm64) +mfc:vx.x.x-gpu # GPU version (amd64 & arm64) +mfc:vx.x.x-xxx-ubuntu-xx.xx # amd64 natively-supported version +mfc:vx.x.x-xxx-ubuntu-xx.xx-arm # arm64 natively-supported version +``` +### **Architecture Support** +You can specify the desired architecture with `--platform /` - either `linux/amd64` or `linux/arm64`. If not sure, Docker automatically selects the available image compatible with your system architecture. If native support isn't available, QEMU emulation is enabled for the following architectures albeit with degraded performance. +``` +linux/amd64 +linux/amd64/v2 +linux/amd64/v3 +linux/arm64 +linux/riscv64 +linux/ppc64le +linux/s390x +linux/386 +linux/mips64le +linux/mips64 +linux/loong64 +linux/arm/v7 +linux/arm/v6 +``` + + +
+
diff --git a/docs/documentation/getting-started.md b/docs/documentation/getting-started.md index 885c528cb9..c88b5978c4 100644 --- a/docs/documentation/getting-started.md +++ b/docs/documentation/getting-started.md @@ -145,6 +145,16 @@ Examples: - Build MFC using a single thread without MPI, GPU, and Debug support: `./mfc.sh build --no-mpi`. - Build MFC's `simulation` code in Debug mode with MPI and GPU support: `./mfc.sh build --debug --gpu -t simulation`. +## Using Containers +As an alternative to building MFC from scratch, use containers to quickly access pre-built MFC with all its dependencies. + +Run the latest MFC container. +```bash +docker run -it --rm --entrypoint bash sbryngelson/mfc:latest-cpu +``` +Please refer to the [Docker](https://mflowcode.github.io/documentation/docker.html) document for more information. + + ## Running the Test Suite Run MFC's test suite with 8 threads: