diff --git a/.bazelci/cache_test.sh b/.bazelci/cache_test.sh index 678fc72019..53f05964cf 100755 --- a/.bazelci/cache_test.sh +++ b/.bazelci/cache_test.sh @@ -5,7 +5,7 @@ # We ensure that the system can build a set of bazel targets. # Run redis container -docker run -d --name buildfarm-redis --network host redis:5.0.9 --bind localhost +docker run -d --name buildfarm-redis --network host redis:7.2.4 --bind localhost # Build a container for buildfarm services cp `which bazel` bazel diff --git a/.bazelci/docker_unit_test.sh b/.bazelci/docker_unit_test.sh index 84d8881508..13ed343f2d 100755 --- a/.bazelci/docker_unit_test.sh +++ b/.bazelci/docker_unit_test.sh @@ -4,4 +4,4 @@ # Build a container for unit tests and run them cp `which bazel` bazel docker build -t buildfarm . -docker run buildfarm /bin/bash -c "cd buildfarm; ./bazel test --build_tests_only --test_tag_filters=-integration,-redis ..." \ No newline at end of file +docker run buildfarm /bin/bash -c "cd buildfarm; ./bazel test --build_tests_only --test_tag_filters=-container,-integration,-redis ..." \ No newline at end of file diff --git a/.bazelci/format.sh b/.bazelci/format.sh index f1255c9c2e..794360086e 100755 --- a/.bazelci/format.sh +++ b/.bazelci/format.sh @@ -5,11 +5,9 @@ FORMAT_JAVA=true REMOVE_NEWLINES_AFTER_START_BRACKET=true -JAVA_FORMATTER_URL=https://github.com/google/google-java-format/releases/download/google-java-format-1.7/google-java-format-1.7-all-deps.jar +JAVA_FORMATTER_URL=https://github.com/google/google-java-format/releases/download/v1.20.0/google-java-format-1.20.0-all-deps.jar LOCAL_FORMATTER="java_formatter.jar" -FORMAT_PROTO=true -CLANG_FORMAT=@llvm_toolchain//:clang-format if [ -z "$BAZEL" ]; then BAZEL=bazel fi @@ -71,20 +69,6 @@ run_java_formatter () { java -jar $LOCAL_FORMATTER -i $files } -run_proto_formatter () { - # Check whether any formatting changes need to be made. - # This is intended to be done by the CI. - if [[ "$@" == "--check" ]] - then - find $PWD -name '*.proto' -exec $BAZEL run $CLANG_FORMAT -- -i --dry-run --Werror {} + - handle_format_error_check - return - fi - - # Fixes formatting issues - find $PWD -name '*.proto' -exec $BAZEL run $CLANG_FORMAT -- -i {} + -} - run_buildifier () { $BAZEL run $BUILDIFIER -- -r > /dev/null 2>&1 } @@ -93,10 +77,6 @@ if [ "${FORMAT_JAVA:-false}" = true ]; then run_java_formatter "$@" fi; -if [ "${FORMAT_PROTO:-false}" = true ]; then - run_proto_formatter "$@" -fi; - if [ "${FORMAT_BUILD:-false}" = true ]; then run_buildifier "$@" fi; diff --git a/.bazelci/integration_test.sh b/.bazelci/integration_test.sh index 9a9acf8c14..e3cf5344f3 100755 --- a/.bazelci/integration_test.sh +++ b/.bazelci/integration_test.sh @@ -5,7 +5,7 @@ # We ensure that the system can build a set of bazel targets. # Run redis container -docker run -d --name buildfarm-redis --network host redis:5.0.9 --bind localhost +docker run -d --name buildfarm-redis --network host redis:7.2.4 --bind localhost # Build a container for buildfarm services cp `which bazel` bazel diff --git a/.bazelci/presubmit.yml b/.bazelci/presubmit.yml index 98b121e215..fdc0fa986d 100644 --- a/.bazelci/presubmit.yml +++ b/.bazelci/presubmit.yml @@ -1,5 +1,5 @@ --- -buildifier: 4.2.5 +buildifier: 6.4.0 tasks: # Linting jobs @@ -41,6 +41,8 @@ tasks: name: "Unit Tests" build_targets: - "..." + build_flags: + - "--build_tag_filters=-container" test_flags: - "--test_tag_filters=-integration,-redis" test_targets: @@ -49,28 +51,34 @@ tasks: name: "Unit Tests" build_targets: - "..." + build_flags: + - "--build_tag_filters=-container" test_flags: - "--test_tag_filters=-integration,-redis" test_targets: - "..." macos: name: "Unit Tests" + environment: + USE_BAZEL_VERSION: 17be878292730359c9c90efdceabed26126df7ae build_flags: + - "--cxxopt=-std=c++14" - "--build_tag_filters=-container" build_targets: - "..." test_flags: - - "--test_tag_filters=-integration,-redis" + - "--test_tag_filters=-container,-integration,-redis" test_targets: - "..." windows: name: "Unit Tests" build_flags: - - "--build_tag_filters=-container,-audit" + - "--build_tag_filters=-container" build_targets: - "..." test_flags: - - "--test_tag_filters=-integration,-redis" + - "--@rules_jvm_external//settings:stamp_manifest=False" + - "--test_tag_filters=-container,-integration,-redis" test_targets: - "..." rpm_builds: diff --git a/.bazelci/redis_unit_tests.sh b/.bazelci/redis_unit_tests.sh index 4eddd046ef..d3298c34ff 100755 --- a/.bazelci/redis_unit_tests.sh +++ b/.bazelci/redis_unit_tests.sh @@ -3,7 +3,9 @@ # However this runs unit tests that interact directly with redis. # Run redis container -docker run -d --rm --name buildfarm-redis --network host redis:5.0.9 --bind localhost +docker run -d --rm --name buildfarm-redis --network host redis:7.2.4 --bind localhost # Run tests that rely on redis bazel test --build_tests_only --test_tag_filters=redis src/test/java/... + +docker stop buildfarm-redis diff --git a/.bazelci/run_server_test.sh b/.bazelci/run_server_test.sh index 21b6d70389..6169e63b5c 100755 --- a/.bazelci/run_server_test.sh +++ b/.bazelci/run_server_test.sh @@ -1,18 +1,18 @@ #!/bin/bash # Start redis container -docker run -d --rm --name buildfarm-redis --network host redis:5.0.9 --bind localhost +docker run -d --rm --name buildfarm-redis --network host redis:7.2.4 --bind localhost # Build worker and server targets bazel build //src/main/java/build/buildfarm:buildfarm-shard-worker bazel build //src/main/java/build/buildfarm:buildfarm-server # Start a single worker -bazel run //src/main/java/build/buildfarm:buildfarm-shard-worker $(pwd)/examples/config.minimal.yml > server.log 2>&1 & +bazel run //src/main/java/build/buildfarm:buildfarm-shard-worker $(pwd)/examples/config.minimal.yml > worker.log 2>&1 & echo "Started buildfarm-shard-worker..." # Start a single server -bazel run //src/main/java/build/buildfarm:buildfarm-server $(pwd)/examples/config.minimal.yml > worker.log 2>&1 & +bazel run //src/main/java/build/buildfarm:buildfarm-server $(pwd)/examples/config.minimal.yml > server.log 2>&1 & echo "Started buildfarm-server..." echo "Wait for startup to finish..." diff --git a/.bazelrc b/.bazelrc index cfea58ba6a..3ca98330c3 100644 --- a/.bazelrc +++ b/.bazelrc @@ -1,3 +1,10 @@ +build --java_language_version=17 +build --java_runtime_version=remotejdk_17 + +build --tool_java_language_version=17 +build --tool_java_runtime_version=remotejdk_17 + + common --enable_platform_specific_config build:fuse --define=fuse=true @@ -14,3 +21,11 @@ test --test_tag_filters=-redis,-integration # Ensure buildfarm is compatible with future versions of bazel. # https://buildkite.com/bazel/bazelisk-plus-incompatible-flags common --incompatible_disallow_empty_glob + +common --enable_bzlmod + +# See also https://bazel.build/external/lockfile. +common --lockfile_mode=off +# It's off because we have mac/windows/linux developers who may not have access +# to all three to update the platform-specific bits of the lockfile. + diff --git a/.bazelversion b/.bazelversion index 5e3254243a..21c8c7b46b 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -6.1.2 +7.1.1 diff --git a/.github/workflows/buildfarm-helm-chart-lint.yml b/.github/workflows/buildfarm-helm-chart-lint.yml new file mode 100644 index 0000000000..c301732145 --- /dev/null +++ b/.github/workflows/buildfarm-helm-chart-lint.yml @@ -0,0 +1,24 @@ +--- +name: Lint Helm Chart + +on: + push: + paths: + - kubernetes/helm-charts/buildfarm/** + +env: + CHART_ROOT: kubernetes/helm-charts/buildfarm + +jobs: + lint: + name: Lint Helm Chart + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: helm-lint + name: Lint Helm Chart + run: |- + set -ex + helm dep up "${CHART_ROOT}" + helm lint "${CHART_ROOT}" diff --git a/.github/workflows/buildfarm-helm-chart-publish.yml b/.github/workflows/buildfarm-helm-chart-publish.yml new file mode 100644 index 0000000000..322634368f --- /dev/null +++ b/.github/workflows/buildfarm-helm-chart-publish.yml @@ -0,0 +1,58 @@ +--- +name: Package and Publish Helm Chart + +on: + push: + tags: + - 'helm/*' + +env: + CHART_NAME: buildfarm + CHART_ROOT: ${{ github.workspace }}/kubernetes/helm-charts/buildfarm + GHCR_REPO: ghcr.io/${{ github.repository_owner }} + +jobs: + build: + name: Lint, Package, and Release BuildFarm Helm Chart + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: get-chart-ver + name: Extracting Helm Chart Version from Tag + run: | + set -ex + echo "chart_ver=$(echo $GITHUB_REF | cut -d / -f 4)" >> $GITHUB_OUTPUT + - id: set-chart-yaml-version + name: Etching Helm Chart Version into Chart.yaml for Packaging + run: | + set -ex + echo setting Chart version to \ + "${{ steps.get-chart-ver.outputs.chart_ver }}" \ + in ${CHART_ROOT}/Chart.yaml + yq -i \ + '.version |= "${{ steps.get-chart-ver.outputs.chart_ver }}"' \ + ${CHART_ROOT}/Chart.yaml + - id: helm-lint + name: Helm Chart Lint + run: |- + set -ex + helm dep up "${CHART_ROOT}" + helm lint "${CHART_ROOT}" + - id: helm-bundle-push + name: Helm Chart Bundle and Push + run: |- + set -e + echo ${{ secrets.GITHUB_TOKEN }} | \ + helm registry \ + login "${GHCR_REPO}" \ + --username "${{ github.repository_owner }}" \ + --password-stdin + set -ex + helm dep up "${CHART_ROOT}" + helm package "${CHART_ROOT}" + export CHART_BUNDLE="${CHART_NAME}-${{ steps.get-chart-ver.outputs.chart_ver }}.tgz" + ls -l "${CHART_BUNDLE}" + helm push \ + "${CHART_BUNDLE}" \ + "oci://${GHCR_REPO}" diff --git a/.github/workflows/buildfarm-images-build-and-deploy.yml b/.github/workflows/buildfarm-images-build-and-deploy.yml new file mode 100644 index 0000000000..3e72ae34a6 --- /dev/null +++ b/.github/workflows/buildfarm-images-build-and-deploy.yml @@ -0,0 +1,31 @@ +name: Build and Push Latest Buildfarm Images + +on: + push: + branches: + - main + +jobs: + build: + if: github.repository == 'bazelbuild/bazel-buildfarm' + name: Build Buildfarm Images + runs-on: ubuntu-latest + steps: + - uses: bazelbuild/setup-bazelisk@v2 + + - name: Checkout + uses: actions/checkout@v3 + + - name: Login to Bazelbuild Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.BAZELBUILD_DOCKERHUB_USERNAME }} + password: ${{ secrets.BAZELBUILD_DOCKERHUB_TOKEN }} + + - name: Build Server Image + id: buildAndPushServerImage + run: bazel run public_push_buildfarm-server -- --tag latest + + - name: Build Worker Image + id: buildAndPushWorkerImage + run: bazel run public_push_buildfarm-worker -- --tag latest diff --git a/.github/workflows/buildfarm-release-build-and-deploy.yml b/.github/workflows/buildfarm-release-build-and-deploy.yml new file mode 100644 index 0000000000..db9fe8dcf1 --- /dev/null +++ b/.github/workflows/buildfarm-release-build-and-deploy.yml @@ -0,0 +1,30 @@ +name: Build and Push Buildfarm Releases + +on: + release: + types: [published] + +jobs: + build: + if: github.repository == 'bazelbuild/bazel-buildfarm' + name: Build Buildfarm Images + runs-on: ubuntu-latest + steps: + - uses: bazelbuild/setup-bazelisk@v2 + + - name: Checkout + uses: actions/checkout@v3 + + - name: Login to Bazelbuild Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.BAZELBUILD_DOCKERHUB_USERNAME }} + password: ${{ secrets.BAZELBUILD_DOCKERHUB_TOKEN }} + + - name: Build Server Image + id: buildAndPushServerImage + run: bazel run public_push_buildfarm-server -- --tag ${{ github.event.release.tag_name }} + + - name: Build Worker Image + id: buildAndPushWorkerImage + run: bazel run public_push_buildfarm-worker -- --tag ${{ github.event.release.tag_name }} diff --git a/.github/workflows/buildfarm-worker-base-build-and-deploy.yml b/.github/workflows/buildfarm-worker-base-build-and-deploy.yml new file mode 100644 index 0000000000..a212e5e61a --- /dev/null +++ b/.github/workflows/buildfarm-worker-base-build-and-deploy.yml @@ -0,0 +1,39 @@ +name: Build and Push Base Buildfarm Worker Images + +on: + push: + branches: + - main + paths: + - ci/base-worker-image/jammy/Dockerfile + - ci/base-worker-image/mantic/Dockerfile +jobs: + build: + if: github.repository == 'bazelbuild/bazel-buildfarm' + name: Build Base Buildfarm Worker Image + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Login to Bazelbuild Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.BAZELBUILD_DOCKERHUB_USERNAME }} + password: ${{ secrets.BAZELBUILD_DOCKERHUB_TOKEN }} + + - name: Build Jammy Docker image + uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 + with: + context: . + file: ./ci/base-worker-image/jammy/Dockerfile + push: true + tags: bazelbuild/buildfarm-worker-base:jammy + + - name: Build Mantic Docker image + uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 + with: + context: . + file: ./ci/base-worker-image/mantic/Dockerfile + push: true + tags: bazelbuild/buildfarm-worker-base:mantic diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000000..e2c75ddef5 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,50 @@ +name: CodeQL + +# Declare default permissions as read only. +permissions: read-all + +on: + pull_request: + branches: [main] + push: + branches: + - main + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + + permissions: + security-events: write + + strategy: + matrix: + language: ["java-kotlin"] + + steps: + - uses: bazel-contrib/setup-bazel@0.8.1 + with: + # Avoid downloading Bazel every time. + bazelisk-cache: true + + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + build-mode: manual + + - name: Build java + # Note: Bazel requires specific args to do the build with a little caching as possible. Kind of the anthesis of Bazel's philosophy, + # But codeql wants to observe all the compilation. + # See also: https://docs.github.com/en/enterprise-cloud@latest/code-security/codeql-cli/getting-started-with-the-codeql-cli/preparing-your-code-for-codeql-analysis#specifying-build-commands + run: | + bazel build --spawn_strategy=local --nouse_action_cache --noremote_accept_cached --noremote_upload_local_results //src/main/java/build/buildfarm:buildfarm-server //src/main/java/build/buildfarm:buildfarm-shard-worker + bazel shutdown + + - uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{ matrix.language }}" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 064cde87fe..a4facca3c2 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -20,7 +20,7 @@ jobs: ${{ runner.os }}-gems- # Use GitHub Deploy Action to build and deploy to Github - - uses: jeffreytse/jekyll-deploy-action@v0.4.0 + - uses: jeffreytse/jekyll-deploy-action@v0.5.0 with: provider: 'github' token: ${{ secrets.GH_TOKEN }} # It's your Personal Access Token(PAT) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml new file mode 100644 index 0000000000..04f97ff17e --- /dev/null +++ b/.github/workflows/scorecards.yml @@ -0,0 +1,68 @@ +name: Scorecards supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: "23 2 * * 5" + push: + branches: ["main"] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecards analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecards on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@4759df8df70c5ebe7042c3029bbace20eee13edd # v2.23.1 + with: + sarif_file: results.sarif diff --git a/AUTHORS b/AUTHORS index f9314dfc35..ed0de30898 100644 --- a/AUTHORS +++ b/AUTHORS @@ -9,3 +9,4 @@ Uber Technologies Inc. Aurora Innovation, Inc. VMware, Inc. +Salesforce, Inc. diff --git a/BUILD b/BUILD index c2308b862a..a1f6749aaa 100644 --- a/BUILD +++ b/BUILD @@ -1,11 +1,9 @@ -load("@com_github_bazelbuild_buildtools//buildifier:def.bzl", "buildifier") -load("@io_bazel_rules_docker//java:image.bzl", "java_image") -load("@io_bazel_rules_docker//docker/package_managers:download_pkgs.bzl", "download_pkgs") -load("@io_bazel_rules_docker//docker/package_managers:install_pkgs.bzl", "install_pkgs") -load("@io_bazel_rules_docker//container:container.bzl", "container_image") -load("@rules_oss_audit//oss_audit:java/oss_audit.bzl", "oss_audit") +load("@buildifier_prebuilt//:rules.bzl", "buildifier") +load("@rules_oci//oci:defs.bzl", "oci_image", "oci_image_index", "oci_push", "oci_tarball") +load("@rules_pkg//:pkg.bzl", "pkg_tar") +load("@rules_pkg//pkg:mappings.bzl", "pkg_attributes", "pkg_files") load("//:jvm_flags.bzl", "server_jvm_flags", "worker_jvm_flags") -load("@rules_pkg//pkg:tar.bzl", "pkg_tar") +load("//container:defs.bzl", "oci_image_env") package(default_visibility = ["//visibility:public"]) @@ -20,6 +18,17 @@ buildifier( # For example, "debgging tools", "introspection tools", and "exeution wrappers" are examples of dependencies # that many need included within deployed containers. This BUILD file creates docker images that bundle # additional dependencies alongside the buildfarm agents. +ARCH = [ + # keep sorted + # "aarch64", # TODO + "amd64", +] + +DEFAULT_IMAGE_LABELS = { + "org.opencontainers.image.source": "https://github.com/bazelbuild/bazel-buildfarm", +} + +DEFAULT_PACKAGE_DIR = "app/build_buildfarm" # == Execution Wrappers == # Execution wrappers are programs that buildfarm chooses to use when running REAPI actions. They are used for @@ -35,53 +44,47 @@ buildifier( # bazel version is used in buildfarm agents as is used by bazel clients. There has not been any known issues due # to version mismatch, but we state the possibility here. Some execution wrappers will not be compatible with all # operating systems. We make a best effort and ensure they all work in the below images. -java_library( +pkg_tar( name = "execution_wrappers", - data = [ - ":as-nobody", - ":delay", - #":linux-sandbox.binary", # Darwin build is broken - ":macos-wrapper", - #":process-wrapper.binary", # Darwin build is broken - ":skip_sleep.binary", - ":skip_sleep.preload", - ":tini.binary", + srcs = [ + ":exec-wrapper-files", + ":exec-wrapper-helpers", ], + package_dir = DEFAULT_PACKAGE_DIR, + tags = ["container"], ) -java_library( +pkg_tar( name = "telemetry_tools", - data = [ + srcs = [ ":opentelemetry-javaagent", ], + package_dir = DEFAULT_PACKAGE_DIR, + tags = ["container"], ) -genrule( - name = "process-wrapper.binary", - srcs = ["@bazel//src/main/tools:process-wrapper"], - outs = ["process-wrapper"], - cmd = "cp $< $@;", -) - -genrule( - name = "linux-sandbox.binary", - srcs = ["@bazel//src/main/tools:linux-sandbox"], - outs = ["linux-sandbox"], - cmd = "cp $< $@;", -) - -genrule( +pkg_files( name = "tini.binary", srcs = ["@tini//file"], - outs = ["tini"], - cmd = "cp $< $@ && chmod +x $@", + attributes = pkg_attributes( + mode = "0555", + ), + renames = { + "@tini//file": "tini", + }, + tags = ["container"], ) -genrule( +pkg_files( name = "opentelemetry-javaagent", srcs = ["@opentelemetry//jar"], - outs = ["opentelemetry-javaagent.jar"], - cmd = "cp $< $@;", + attributes = pkg_attributes( + mode = "0444", + ), + renames = { + "@opentelemetry//jar": "opentelemetry-javaagent.jar", + }, + tags = ["container"], ) cc_binary( @@ -90,118 +93,169 @@ cc_binary( "//config:windows": ["as-nobody-windows.c"], "//conditions:default": ["as-nobody.c"], }), + tags = ["container"], ) -genrule( - name = "skip_sleep.binary", - srcs = ["@skip_sleep"], - outs = ["skip_sleep"], - cmd = "cp $< $@;", +pkg_files( + name = "exec-wrapper-files", + srcs = [ + ":as-nobody", + "@bazel//src/main/tools:linux-sandbox", + "@bazel//src/main/tools:process-wrapper", + "@skip_sleep", + # The delay wrapper is only intended to be used with the "skip_sleep" wrapper. + "delay.sh", + "macos-wrapper.sh", + ], + attributes = pkg_attributes( + mode = "0555", + ), + tags = ["container"], ) -genrule( - name = "skip_sleep.preload", +pkg_files( + name = "exec-wrapper-helpers", srcs = ["@skip_sleep//:skip_sleep_preload"], - outs = ["skip_sleep_preload.so"], - cmd = "cp $< $@;", -) - -# The delay wrapper is only intended to be used with the "skip_sleep" wrapper. -sh_binary( - name = "delay", - srcs = ["delay.sh"], + attributes = pkg_attributes( + mode = "0444", + ), + prefix = DEFAULT_PACKAGE_DIR, + renames = { + "@skip_sleep//:skip_sleep_preload": "skip_sleep_preload.so", + }, + tags = ["container"], ) -sh_binary( - name = "macos-wrapper", - srcs = ["macos-wrapper.sh"], +pkg_tar( + name = "layer_tini_amd64", + srcs = [":tini.binary"], + tags = ["container"], ) -# Docker images for buildfarm components -java_image( - name = "buildfarm-server", - args = ["/app/build_buildfarm/examples/config.minimal.yml"], - base = "@amazon_corretto_java_image_base//image", - classpath_resources = [ - "//src/main/java/build/buildfarm:configs", - ], - data = [ - "//examples:example_configs", - "//src/main/java/build/buildfarm:configs", - ], - jvm_flags = server_jvm_flags(), - main_class = "build.buildfarm.server.BuildFarmServer", +pkg_tar( + name = "layer_buildfarm_server", + srcs = ["//src/main/java/build/buildfarm:buildfarm-server_deploy.jar"], + package_dir = DEFAULT_PACKAGE_DIR, tags = ["container"], - runtime_deps = [ - ":telemetry_tools", - "//src/main/java/build/buildfarm/server", - ], ) -oss_audit( - name = "buildfarm-server-audit", - src = "//src/main/java/build/buildfarm:buildfarm-server", - tags = ["audit"], +pkg_tar( + name = "layer_buildfarm_worker", + srcs = ["//src/main/java/build/buildfarm:buildfarm-shard-worker_deploy.jar"], + package_dir = DEFAULT_PACKAGE_DIR, + tags = ["container"], ) -# A worker image may need additional packages installed that are not in the base image. -# We use download/install rules to extend an upstream image. -# Download cgroup-tools so that the worker is able to restrict actions via control groups. -download_pkgs( - name = "worker_pkgs", - image_tar = "@ubuntu-bionic//image", - packages = ["cgroup-tools"], +pkg_tar( + name = "layer_minimal_config", + srcs = ["@build_buildfarm//examples:example_configs"], + package_dir = DEFAULT_PACKAGE_DIR, tags = ["container"], ) -install_pkgs( - name = "worker_pkgs_image", - image_tar = "@ubuntu-bionic//image", - installables_tar = ":worker_pkgs.tar", - installation_cleanup_commands = "rm -rf /var/lib/apt/lists/*", - output_image_name = "worker_pkgs_image", +pkg_tar( + name = "layer_logging_config", + srcs = ["@build_buildfarm//src/main/java/build/buildfarm:configs"], + package_dir = DEFAULT_PACKAGE_DIR + "/src/main/java/build/buildfarm", tags = ["container"], ) -# This becomes the new base image when creating worker images. -container_image( - name = "worker_pkgs_image_wrapper", - base = ":worker_pkgs_image.tar", - tags = ["container"], +oci_image_env( + name = "env_server", + configpath = "/" + DEFAULT_PACKAGE_DIR + "/config.minimal.yml", + jvm_args = server_jvm_flags(), ) -java_image( - name = "buildfarm-shard-worker", - args = ["/app/build_buildfarm/examples/config.minimal.yml"], - base = ":worker_pkgs_image_wrapper", - classpath_resources = [ - "//src/main/java/build/buildfarm:configs", - ], - data = [ - "//examples:example_configs", - "//src/main/java/build/buildfarm:configs", +oci_image( + name = "buildfarm-server_linux_amd64", + base = "@amazon_corretto_java_image_base", + entrypoint = [ + "java", + "-jar", + "/" + DEFAULT_PACKAGE_DIR + "/buildfarm-server_deploy.jar", ], - jvm_flags = worker_jvm_flags(), - main_class = "build.buildfarm.worker.shard.Worker", + env = ":env_server", + labels = DEFAULT_IMAGE_LABELS, tags = ["container"], - runtime_deps = [ - ":execution_wrappers", + tars = [ + # do not sort + ":layer_logging_config", + ":layer_minimal_config", ":telemetry_tools", - "//src/main/java/build/buildfarm/worker/shard", + ":layer_buildfarm_server", ], ) -oss_audit( - name = "buildfarm-shard-worker-audit", - src = "//src/main/java/build/buildfarm:buildfarm-shard-worker", - tags = ["audit"], +oci_image_env( + name = "env_worker", + configpath = "/" + DEFAULT_PACKAGE_DIR + "/config.minimal.yml", + jvm_args = worker_jvm_flags(), ) -pkg_tar( - name = "buildfarm-shard-worker-tar", - srcs = [ - "//examples:example_configs", - "//src/main/java/build/buildfarm:buildfarm-shard-worker_deploy.jar", - "//src/main/java/build/buildfarm:configs", +oci_image( + name = "buildfarm-worker_linux_amd64", + base = "@ubuntu_mantic", + entrypoint = [ + # do not sort + "/tini", + "--", + "java", + "-jar", + "/" + DEFAULT_PACKAGE_DIR + "/buildfarm-shard-worker_deploy.jar", + ], + env = ":env_worker", + labels = DEFAULT_IMAGE_LABELS, + tags = ["container"], + tars = [ + # do not sort + ":layer_tini_amd64", + ":layer_logging_config", + ":layer_minimal_config", + ":execution_wrappers", + ":telemetry_tools", + ":layer_buildfarm_worker", ], ) + +[ + oci_image_index( + name = "buildfarm-%s" % image, + images = [ + ":buildfarm-%s_linux_%s" % (image, arch) + for arch in ARCH + ], + tags = ["container"], + ) + for image in [ + "server", + "worker", + ] +] + +###### +# Helpers to write to the local Docker Desktop's registry +# Usage: `bazel run //:tarball_server_amd64 && docker run --rm buildfarm-server:amd64` +###### +[ + [ + oci_tarball( + name = "tarball_%s_%s" % (image, arch), + image = ":buildfarm-%s_linux_%s" % (image, arch), + repo_tags = ["buildfarm-%s:%s" % (image, arch)], + tags = ["container"], + ), + # Below targets push public docker images to bazelbuild dockerhub. + oci_push( + name = "public_push_buildfarm-%s" % image, + image = ":buildfarm-%s" % image, + repository = "index.docker.io/bazelbuild/buildfarm-%s" % image, + # Specify the tag with `bazel run public_push_buildfarm-server public_push_buildfarm-worker -- --tag latest` + tags = ["container"], + ), + ] + for arch in ARCH + for image in [ + "server", + "worker", + ] +] diff --git a/CODEOWNERS b/CODEOWNERS index 6ee2ef6c3e..f755f1edff 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1 @@ -* @werkt @ulfjack +* @werkt diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 1eb0fc98cd..9961c13001 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -13,3 +13,4 @@ George Gensure Yuriy Belenitsky Trevor Hickey Jacob Mou +Jason Schroeder diff --git a/Dockerfile b/Dockerfile index 11c3379edc..2e49d697f7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,8 +7,8 @@ FROM ubuntu:18.04 RUN echo 'APT::Acquire::Retries "5";' > /etc/apt/apt.conf.d/80retries RUN apt-get update -RUN apt-get -y install wget git python gcc openjdk-8-jdk g++ redis redis-server +RUN apt-get -y install wget git zip python gcc openjdk-8-jdk g++ redis redis-server RUN wget --tries=10 -O get-pip.py https://bootstrap.pypa.io/pip/2.7/get-pip.py RUN python2 get-pip.py -RUN pip install python-dateutil +RUN pip install python-dateutil==2.8.2 COPY . buildfarm diff --git a/MODULE.bazel b/MODULE.bazel new file mode 100644 index 0000000000..094d71aa1a --- /dev/null +++ b/MODULE.bazel @@ -0,0 +1,209 @@ +"""Buildfarm build and test""" + +module( + name = "build_buildfarm", + repo_name = "build_buildfarm", +) + +bazel_dep(name = "bazel_skylib", version = "1.5.0") +bazel_dep(name = "blake3", version = "1.3.3.bcr.1") +bazel_dep(name = "buildifier_prebuilt", version = "6.4.0") +bazel_dep(name = "gazelle", version = "0.35.0", repo_name = "bazel_gazelle") +bazel_dep(name = "grpc-java", version = "1.62.2") +bazel_dep(name = "googleapis", version = "0.0.0-20240326-1c8d509c5", repo_name = "com_google_googleapis") +bazel_dep(name = "platforms", version = "0.0.8") +bazel_dep(name = "protobuf", version = "23.1", repo_name = "com_google_protobuf") +bazel_dep(name = "rules_cc", version = "0.0.9") +bazel_dep(name = "rules_go", version = "0.46.0", repo_name = "io_bazel_rules_go") +bazel_dep(name = "rules_java", version = "7.4.0") +bazel_dep(name = "rules_jvm_external", version = "6.0") +bazel_dep(name = "rules_license", version = "0.0.7") +bazel_dep(name = "rules_oci", version = "1.7.4") +bazel_dep(name = "rules_pkg", version = "0.10.1") +bazel_dep(name = "rules_proto", version = "6.0.0-rc2") + +# Test dependencies +bazel_dep( + name = "container_structure_test", + version = "1.16.0", + dev_dependency = True, +) + +# TODO: remove this after https://github.com/bazelbuild/remote-apis/pull/293 is merged +bazel_dep(name = "remoteapis", version = "eb433accc6a666b782ea4b787eb598e5c3d27c93") +archive_override( + module_name = "remoteapis", + integrity = "sha256-68wzxNAkPZ49/zFwPYQ5z9MYbgxoeIEazKJ24+4YqIQ=", + strip_prefix = "remote-apis-eb433accc6a666b782ea4b787eb598e5c3d27c93", + urls = [ + "https://github.com/bazelbuild/remote-apis/archive/eb433accc6a666b782ea4b787eb598e5c3d27c93.zip", + ], +) + +IO_NETTY_MODULES = [ + "buffer", + "codec", + "codec-http", + "codec-http2", + "codec-socks", + "common", + "handler", + "handler-proxy", + "resolver", + "transport", + "transport-native-epoll", + "transport-native-kqueue", + "transport-native-unix-common", +] + +IO_GRPC_MODULES = [ + "api", + "auth", + "core", + "context", + "netty", + "stub", + "protobuf", + "testing", + "services", + "netty-shaded", +] + +COM_AWS_MODULES = [ + "s3", + "secretsmanager", +] + +maven = use_extension("@rules_jvm_external//:extensions.bzl", "maven") +maven.install( + artifacts = ["com.amazonaws:aws-java-sdk-%s:1.12.544" % module for module in COM_AWS_MODULES] + [ + "com.fasterxml.jackson.core:jackson-databind:2.15.0", + "com.github.ben-manes.caffeine:caffeine:2.9.0", + "com.github.docker-java:docker-java:3.3.3", + "com.github.fppt:jedis-mock:1.0.13", + "com.github.jnr:jffi:1.3.11", + "com.github.jnr:jffi:jar:native:1.3.11", + "com.github.jnr:jnr-constants:0.10.4", + "com.github.jnr:jnr-ffi:2.2.14", + "com.github.jnr:jnr-posix:3.1.17", + "com.github.luben:zstd-jni:1.5.5-7", + "com.github.oshi:oshi-core:6.4.5", + "com.github.pcj:google-options:1.0.0", + "com.github.serceman:jnr-fuse:0.5.7", + "com.google.auth:google-auth-library-credentials:1.19.0", + "com.google.auth:google-auth-library-oauth2-http:1.19.0", + "com.google.code.findbugs:jsr305:3.0.2", + "com.google.code.gson:gson:2.10.1", + "com.google.errorprone:error_prone_annotations:2.22.0", + "com.google.errorprone:error_prone_core:2.22.0", + "com.google.guava:failureaccess:1.0.1", + "com.google.guava:guava:32.1.1-jre", + "com.google.j2objc:j2objc-annotations:2.8", + "com.google.jimfs:jimfs:1.3.0", + "com.google.protobuf:protobuf-java-util:3.19.1", + "com.google.protobuf:protobuf-java:3.19.1", + "com.google.truth:truth:1.1.5", + "com.googlecode.json-simple:json-simple:1.1.1", + "com.jayway.jsonpath:json-path:2.8.0", + "net.jcip:jcip-annotations:1.0", + "org.bouncycastle:bcprov-jdk15on:1.70", + "org.slf4j:slf4j-simple:2.0.9", + ] + ["io.netty:netty-%s:4.1.97.Final" % module for module in IO_NETTY_MODULES] + + ["io.grpc:grpc-%s:1.62.2" % module for module in IO_GRPC_MODULES] + [ + "io.prometheus:simpleclient:0.15.0", + "io.prometheus:simpleclient_hotspot:0.15.0", + "io.prometheus:simpleclient_httpserver:0.15.0", + "javax.annotation:javax.annotation-api:1.3.2", + "junit:junit:4.13.2", + "me.dinowernli:java-grpc-prometheus:0.6.0", + "net.javacrumbs.future-converter:future-converter-java8-guava:1.2.0", + "org.apache.commons:commons-compress:1.23.0", + "org.apache.commons:commons-lang3:3.13.0", + "org.apache.commons:commons-pool2:2.11.1", + "org.apache.tomcat:annotations-api:6.0.53", + "org.bouncycastle:bcprov-jdk15on:1.70", + "org.checkerframework:checker-qual:3.38.0", + "org.jetbrains:annotations:16.0.2", + "org.mockito:mockito-core:5.10.0", + "org.openjdk.jmh:jmh-core:1.37", + "org.openjdk.jmh:jmh-generator-annprocess:1.37", + "org.projectlombok:lombok:1.18.30", + "org.redisson:redisson:3.23.4", + "org.slf4j:slf4j-simple:2.0.9", + "org.threeten:threetenbp:1.6.8", + "org.xerial:sqlite-jdbc:3.34.0", + "org.yaml:snakeyaml:2.2", + "redis.clients:jedis:5.1.2", + ], + fail_if_repin_required = True, # TO RE-PIN: REPIN=1 bazel run @unpinned_maven//:pin + generate_compat_repositories = True, + lock_file = "//:maven_install.json", + repositories = [ + "https://repo.maven.apache.org/maven2", + ], + strict_visibility = False, # True breaks aws jars +) +use_repo( + maven, + "maven", + "unpinned_maven", +) + +oci = use_extension("@rules_oci//oci:extensions.bzl", "oci") + +# Server base image +oci.pull( + # This is a multi-arch image! + name = "amazon_corretto_java_image_base", + digest = "sha256:f0e6040a09168500a1e96d02fef42a26176aaec8e0f136afba081366cb98e2f6", # tag:21 as of today. + image = "public.ecr.aws/amazoncorretto/amazoncorretto", + platforms = [ + "linux/amd64", + "linux/arm64/v8", + ], +) + +# Worker base image +oci.pull( + name = "ubuntu_mantic", + digest = "sha256:2520e0725493c8f63452dd8aa153fbf0b489a9442096b7693641193709a765b7", # tag: mantic + image = "index.docker.io/bazelbuild/buildfarm-worker-base", +) +use_repo( + oci, + "amazon_corretto_java_image_base", + "ubuntu_mantic", +) + +# https://github.com/bazelbuild/rules_python/pull/713#issuecomment-1885628496 +# Satisfy running tests in Docker as root. +bazel_dep(name = "rules_python", version = "0.31.0") + +python = use_extension("@rules_python//python/extensions:python.bzl", "python") +python.toolchain( + configure_coverage_tool = False, + ignore_root_user_error = True, + python_version = "3.11", +) + +build_deps = use_extension("//:extensions.bzl", "build_deps") +use_repo( + build_deps, + "bazel", + "io_grpc_grpc_proto", + "opentelemetry", + "skip_sleep", + "tini", +) + +googleapis_switched_rules = use_extension("@com_google_googleapis//:extensions.bzl", "switched_rules") +googleapis_switched_rules.use_languages( + grpc = True, + java = True, +) +use_repo(googleapis_switched_rules, "com_google_googleapis_imports") + +find_rpm = use_extension("@rules_pkg//toolchains/rpm:rpmbuild_configure.bzl", "find_system_rpmbuild_bzlmod") +use_repo(find_rpm, "rules_pkg_rpmbuild") + +register_toolchains("@rules_pkg_rpmbuild//:all") diff --git a/README.md b/README.md index 8dc99874b3..6813ab4545 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,11 @@ # Bazel Buildfarm ![Build status](https://badge.buildkite.com/45f4fd4c0cfb95f7705156a4119641c6d5d6c310452d6e65a4.svg?branch=main) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/bazelbuild/bazel-buildfarm/badge)](https://securityscorecards.dev/viewer/?uri=github.com/bazelbuild/bazel-buildfarm) +![GitHub License](https://img.shields.io/github/license/bazelbuild/bazel-buildfarm) +![GitHub Release](https://img.shields.io/github/v/release/bazelbuild/bazel-buildfarm) +![Docker Pulls](https://img.shields.io/docker/pulls/bazelbuild/buildfarm-server) + This repository hosts the [Bazel](https://bazel.build) remote caching and execution system. @@ -19,8 +24,8 @@ All commandline options override corresponding config settings. Run via -``` -docker run -d --rm --name buildfarm-redis -p 6379:6379 redis:5.0.9 +```shell +$ docker run -d --rm --name buildfarm-redis -p 6379:6379 redis:7.2.4 redis-cli config set stop-writes-on-bgsave-error no ``` @@ -28,10 +33,10 @@ redis-cli config set stop-writes-on-bgsave-error no Run via -``` -bazelisk run //src/main/java/build/buildfarm:buildfarm-server -- +```shell +$ bazelisk run //src/main/java/build/buildfarm:buildfarm-server -- -Ex: bazelisk run //src/main/java/build/buildfarm:buildfarm-server -- --jvm_flag=-Dlogging.config=file:$PWD/examples/logging.properties $PWD/examples/config.minimal.yml +Ex: bazelisk run //src/main/java/build/buildfarm:buildfarm-server -- --jvm_flag=-Djava.util.logging.config.file=$PWD/examples/logging.properties $PWD/examples/config.minimal.yml ``` **`logfile`** has to be in the [standard java util logging format](https://docs.oracle.com/cd/E57471_01/bigData.100/data_processing_bdd/src/rdp_logging_config.html) and passed as a --jvm_flag=-Dlogging.config=file: **`configfile`** has to be in [yaml format](https://bazelbuild.github.io/bazel-buildfarm/docs/configuration). @@ -40,10 +45,10 @@ Ex: bazelisk run //src/main/java/build/buildfarm:buildfarm-server -- --jvm_flag= Run via -``` -bazelisk run //src/main/java/build/buildfarm:buildfarm-shard-worker -- +```shell +$ bazelisk run //src/main/java/build/buildfarm:buildfarm-shard-worker -- -Ex: bazelisk run //src/main/java/build/buildfarm:buildfarm-shard-worker -- --jvm_flag=-Dlogging.config=file:$PWD/examples/logging.properties $PWD/examples/config.minimal.yml +Ex: bazelisk run //src/main/java/build/buildfarm:buildfarm-shard-worker -- --jvm_flag=-Djava.util.logging.config.file=$PWD/examples/logging.properties $PWD/examples/config.minimal.yml ``` **`logfile`** has to be in the [standard java util logging format](https://docs.oracle.com/cd/E57471_01/bigData.100/data_processing_bdd/src/rdp_logging_config.html) and passed as a --jvm_flag=-Dlogging.config=file: @@ -53,9 +58,9 @@ Ex: bazelisk run //src/main/java/build/buildfarm:buildfarm-shard-worker -- --jvm To use the example configured buildfarm with bazel (version 1.0 or higher), you can configure your `.bazelrc` as follows: -``` +```shell $ cat .bazelrc -build --remote_executor=grpc://localhost:8980 +$ build --remote_executor=grpc://localhost:8980 ``` Then run your build as you would normally do. @@ -67,20 +72,20 @@ Buildfarm uses [Java's Logging framework](https://docs.oracle.com/javase/10/core You can use typical Java logging configuration to filter these results and observe the flow of executions through your running services. An example `logging.properties` file has been provided at [examples/logging.properties](examples/logging.properties) for use as follows: -``` -bazel run //src/main/java/build/buildfarm:buildfarm-server -- --jvm_flag=-Dlogging.config=file:$PWD/examples/logging.properties $PWD/examples/config.minimal.yml +```shell +$ bazel run //src/main/java/build/buildfarm:buildfarm-server -- --jvm_flag=-Djava.util.logging.config.file=$PWD/examples/logging.properties $PWD/examples/config.minimal.yml ``` and -``` -bazel run //src/main/java/build/buildfarm:buildfarm-shard-worker -- --jvm_flag=-Dlogging.config=file:$PWD/examples/logging.properties $PWD/examples/config.minimal.yml +``` shell +$ bazel run //src/main/java/build/buildfarm:buildfarm-shard-worker -- --jvm_flag=-Djava.util.logging.config.file=$PWD/examples/logging.properties $PWD/examples/config.minimal.yml ``` To attach a remote debugger, run the executable with the `--debug=` flag. For example: -``` -bazel run //src/main/java/build/buildfarm:buildfarm-server -- --debug=5005 $PWD/examples/config.minimal.yml +```shell +$ bazel run //src/main/java/build/buildfarm:buildfarm-server -- --debug=5005 $PWD/examples/config.minimal.yml ``` @@ -132,3 +137,16 @@ load("@build_buildfarm//:images.bzl", "buildfarm_images") buildfarm_images() ``` + +### Helm Chart + +To install OCI bundled Helm chart: + +```bash +helm install \ + -n bazel-buildfarm \ + --create-namespace \ + bazel-buildfarm \ + oci://ghcr.io/bazelbuild/buildfarm \ + --version "0.2.4" +``` diff --git a/Tiltfile b/Tiltfile index 454f13253b..b706a79122 100644 --- a/Tiltfile +++ b/Tiltfile @@ -34,7 +34,7 @@ def server_deps(): # Inform tilt about the custom images built within the repository. # When you change code, these images will be re-built and re-deployed. custom_build( - ref='buildfarm-shard-worker-image', + ref='bazelbuild/buildfarm-worker', command=( 'bazelisk build --javabase=@bazel_tools//tools/jdk:remote_jdk11 //:buildfarm-shard-worker.tar && ' + 'docker load < bazel-bin/buildfarm-shard-worker.tar && ' + @@ -44,7 +44,7 @@ custom_build( deps = worker_deps() ) custom_build( - ref='buildfarm-server-image', + ref='bazelbuild/buildfarm-server', command=( 'bazelisk build --javabase=@bazel_tools//tools/jdk:remote_jdk11 //:buildfarm-server.tar && ' + 'docker load < bazel-bin/buildfarm-server.tar && ' + @@ -58,22 +58,4 @@ local_resource("unit tests",'bazelisk test --javabase=@bazel_tools//tools/jdk:re # Object definitions for kubernetes. # Tilt will automatically correlate them to any above docker images. -k8s_yaml(local('bazelisk run //kubernetes/deployments:kubernetes')) -k8s_yaml(local('bazelisk run //kubernetes/deployments:server')) -k8s_yaml(local('bazelisk run //kubernetes/deployments:shard-worker')) -k8s_yaml(local('bazelisk run //kubernetes/deployments:redis-cluster')) -k8s_yaml(local('bazelisk run //kubernetes/services:grafana')) -k8s_yaml(local('bazelisk run //kubernetes/services:redis-cluster')) -k8s_yaml(local('bazelisk run //kubernetes/services:shard-worker')) -k8s_yaml(local('bazelisk run //kubernetes/services:open-telemetry')) -k8s_yaml(local('bazelisk run //kubernetes/services:jaeger')) - -# Expose endpoints outside the kubernetes cluster. -k8s_resource('server', port_forwards=[8980,9092], labels="buildfarm-cluster") -k8s_resource('shard-worker', port_forwards=[8981,9091], labels="buildfarm-cluster") -k8s_resource('redis-cluster', port_forwards=6379, labels="buildfarm-cluster") -k8s_resource('otel-agent', labels="tracing") -k8s_resource('otel-collector', port_forwards=[4317,4318], labels="tracing") -k8s_resource('simplest', port_forwards=[14269,16686], labels="tracing") -k8s_resource('kubernetes-dashboard', port_forwards=8443) -k8s_resource('grafana', port_forwards=3000, labels="metrics") +k8s_yaml(helm('kubernetes/helm-charts/buildfarm')) diff --git a/WORKSPACE b/WORKSPACE index 0d4b9462d5..abf429653f 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,30 +1,3 @@ -workspace(name = "build_buildfarm") - -load(":deps.bzl", "buildfarm_dependencies") - -buildfarm_dependencies() - -load(":defs.bzl", "buildfarm_init") - -buildfarm_init() - -load("@rules_oss_audit//oss_audit:repositories.bzl", "rules_oss_audit_dependencies") - -rules_oss_audit_dependencies() - -load("@rules_oss_audit//oss_audit:setup.bzl", "rules_oss_audit_setup") - -rules_oss_audit_setup() - -load("@maven//:compat.bzl", "compat_repositories") - -compat_repositories() - -load(":images.bzl", "buildfarm_images") - -buildfarm_images() - -# Find rpmbuild if it exists. -load("@rules_pkg//toolchains/rpm:rpmbuild_configure.bzl", "find_system_rpmbuild") - -find_system_rpmbuild(name = "rules_pkg_rpmbuild") +# ================================================ # +# All dependencies have been moved to MODULE.bazel # +# ================================================ # diff --git a/_site/docs/architecture/content_addressable_storage.md b/_site/docs/architecture/content_addressable_storage.md index a8955b4ce3..41b50d9948 100644 --- a/_site/docs/architecture/content_addressable_storage.md +++ b/_site/docs/architecture/content_addressable_storage.md @@ -38,9 +38,9 @@ This is the example presentation of a CAS in the memory instance available [here ``` worker: - cas: - type: MEMORY - maxSizeBytes: 2147483648 # 2 * 1024 * 1024 * 1024 + storages: + - type: MEMORY + maxSizeBytes: 2147483648 # 2 * 1024 * 1024 * 1024 ``` ## GRPC @@ -53,9 +53,11 @@ A grpc config example is available in the alternate instance specification in th server: name: shard worker: - cas: - type: GRPC - target: + storages: + - type: FILESYSTEM + path: "cache" + - type: GRPC + target: ``` ## HTTP/1 @@ -89,11 +91,10 @@ The CASFileCache is also available on MemoryInstance servers, where it can repre ``` worker: - cas: - type: FILESYSTEM - path: "cache" - maxSizeBytes: 2147483648 # 2 * 1024 * 1024 * 1024 - maxEntrySizeBytes: 2147483648 # 2 * 1024 * 1024 * 1024 + storages: + - type: FILESYSTEM + path: "cache" + maxSizeBytes: 2147483648 # 2 * 1024 * 1024 * 1024 ``` CASTest is a standalone tool to load the cache and print status information about it. diff --git a/_site/docs/architecture/queues.md b/_site/docs/architecture/queues.md index 872b689735..4ee44764bf 100644 --- a/_site/docs/architecture/queues.md +++ b/_site/docs/architecture/queues.md @@ -25,32 +25,35 @@ If your configuration file does not specify any provisioned queues, buildfarm wi This will ensure the expected behavior for the paradigm in which all work is put on the same queue. ### Matching Algorithm -The matching algorithm is performed by the operation queue when the caller is requesting to push or pop elements. +The matching algorithm is performed by the operation queue when the server or worker is requesting to push or pop elements, respectively. The matching algorithm is designed to find the appropriate queue to perform these actions on. On the scheduler side, the action's platform properties are used for matching. On the worker side, the `dequeue_match_settings` are used. ![Operation Queue Matching]({{site.url}}{{site.baseurl}}/assets/images/Operation-Queue-Matching1.png) -This is how the matching algorithm works: +The matching algorithm works as follows: Each provision queue is checked in the order that it is configured. The first provision queue that is deemed eligible is chosen and used. When deciding if an action is eligible for the provision queue, each platform property is checked individually. By default, there must be a perfect match on each key/value. Wildcards ("*") can be used to avoid the need of a perfect match. Additionally, if the action contains any platform properties is not mentioned by the provision queue, it will be deemed ineligible. -setting `allow_unmatched: true` can be used to allow a superset of action properties as long as a subset matches the provision queue. +setting `allowUnmatched: true` can be used to allow a superset of action properties as long as a subset matches the provision queue. If no provision queues can be matched, the operation queue will provide an analysis on why none of the queues were eligible. -When taking elements off of the operation queue, the matching algorithm behaves a similar way. -The worker's `DequeueMatchSettings` also have an `allow_unmatched` property. -Workers also have the ability to reject an operation after matching with a provision queue and dequeuing a value. -To avoid any of these rejections by the worker, you can use `accept_everything: true`. - -When configuring your worker, consider the following decisions: -First, if the accept_everything setting is true, the job is accepted. -Otherwise, if any execution property for the queue has a wildcard key, the job is accepted. -Otherwise, if the allow_unmatched setting is true, each key present in the queue's properties must be a wildcard or exist in the execution request's properties with an equal value. -Otherwise, the execution request's properties must have exactly the same set of keys as the queue's execution properties, and the request's value for each property must equal the queue's if the queue's value for this property is not a wildcard. +A worker will dequeue operations from matching queues and determine whether to keep and execute it according to the following procedure: +For each property key-value in the operation's platform, an operation is REJECTED if: + The key is `min-cores` and the integer value is greater than the number of cores on the worker. + Or The key is `min-mem` and the integer value is greater than the number of bytes of RAM on the worker. + Or if the key exists in the `DequeueMatchSettings` platform with neither the value nor a `*` in the corresponding DMS platform key's values, + Or if the `allowUnmatched` setting is `false`. +For each resource requested in the operation's platform with the resource: prefix, the action is rejected if: + The resource amount cannot currently be satisfied with the associated resource capacity count + +There are special predefined execution property names which resolve to dynamic configuration for the worker to match against: +`Worker`: The worker's `publicName` +`min-cores`: Less than or equal to the `executeStageWidth` +`process-wrapper`: The set of named `process-wrappers` present in configuration ### Server Example diff --git a/_site/docs/architecture/worker-execution-environment.md b/_site/docs/architecture/worker-execution-environment.md index 89bee694fa..a49343fda3 100644 --- a/_site/docs/architecture/worker-execution-environment.md +++ b/_site/docs/architecture/worker-execution-environment.md @@ -1,6 +1,6 @@ --- layout: default -title: Workers +title: Worker Execution Environment parent: Architecture nav_order: 3 --- @@ -124,4 +124,4 @@ java_image( And now that this is in place, we can use the following to build the container and make it available to our local docker daemon: -`bazel run :buildfarm-shard-worker-ubuntu20-java14` \ No newline at end of file +`bazel run :buildfarm-shard-worker-ubuntu20-java14` diff --git a/_site/docs/architecture/workers.md b/_site/docs/architecture/workers.md index 28765b6a7f..ee3e67f8ad 100644 --- a/_site/docs/architecture/workers.md +++ b/_site/docs/architecture/workers.md @@ -7,20 +7,21 @@ nav_order: 2 # Workers -Workers of all types throughout buildfarm are responsible for presenting execution roots to operations that they are matched with, fetching content from a CAS, executing those processes, and reporting the outputs and results of executions. Additionally, buildfarm supports some common behaviors across worker types: +Workers have two major roles in Buildfarm: Execution and CAS Shard. Either of these options can be disabled, though a worker with both disabled provides no value. -* ExecutionPolicies, which allow for explicit and implicit behaviors to control execution. -* A CAS FileCache, which is capable of reading through content for Digests of files or directories, and efficiently presenting those contents based on usage and reference counting, as well as support for cascading into delegate CASs. -* Concurrent pipelined execution of operations, with support for superscalar stages at input fetch and execution. -* Operation exclusivity, preventing the same operation from running through the worker pipeline concurrently. +Regardless of role, a worker must have a local FILESYSTEM type [storage](https://bazelbuild.github.io/bazel-buildfarm/docs/configuration/configuration/#worker-cas) to retain content. This storage serves both as a resident LRU cache for Execution I/O, and the local storage for a CAS Shard. Workers can delegate to successive storage declarations (FILESYSTEM or GRPC), with read-through or expiration waterfall if configured, but only the first storage entry will be used for Executions. -# Worker Types +## Execution -## Operation Queue +Execution Workers are responsible for matching their environments against operations, presenting execution roots to those operations, fetching content from a CAS, executing processes required to complete the operations, and reporting the outputs and results of executions. Control and delivery of these behaviors is accomplished with several mechanisms: -Operation Queue workers are responsible for taking operations from the Memory OperationQueue service and reporting their contents via external CAS and AC services. Executions are the only driving force for their CAS FileCache. For more details on configuring the operation queue, [see here](https://github.com/bazelbuild/bazel-buildfarm/wiki/Operation-Queue). +* A CAS FileCache, which is capable of reading through content for Digests of files or directories, and efficiently presenting those contents based on usage and reference counting, as well as support for cascading into delegate CASs. +* ExecutionPolicies, which allow for explicit and implicit behaviors to control execution. +* Execution Resources to limit concurrent execution in installation-defined resource traunches. +* Concurrent pipelined execution of operations, with support for superscalar stages at input fetch and execution. +* Operation exclusivity, preventing the same operation from running through the worker pipeline concurrently. -## Shard +## CAS Shard Sharded workers interact with the shard backplane for both execution and CAS presentation. Their CAS FileCache serves a CAS gRPC interface as well as the execution root factory. @@ -56,18 +57,20 @@ The Report Result stage injects any outputs from the operation into the CAS, and # Exec Filesystem -Workers must present Exec Filesystems for actions, and manage their existence for the lifetime of an operation's presence within the pipeline. The realization of an operation's execution root with the execution filesystem constitutes a transaction that the operating directory for an action will appear, be writable for outputs, and released and be made unavailable as it proceeds and exits the pipeline. +Workers use ExecFileSystems to present content to actions, and manage their existence for the lifetime of an operation's presence within the pipeline. The realization of an operation's execution root with the execution filesystem constitutes a transaction that the operating directory for an action will appear, be writable for outputs, and released and be made unavailable as it proceeds and exits the pipeline. This means that an action's entire input directory must be available on a filesystem from a unique location per operation - the _Operation Action Input Root_, or just _Root_. Each input file within the Root must contain the content of the inputs, its requested executability via FileNode, and each directory must contain at the outset, child input files and directories. The filesystem is free to handle unspecified outputs as it sees fit, but the directory hierarchy of output files from the Root must be created before execution, and writable during it. When execution and observation of the outputs is completed, the exec filesystem will be asked to destroy the Root and release any associated resources from its retention. -There are two implementations of Execution Filesystem in Buildfarm. Choosing either a `filesystem` or `fuse` `cas` type in the worker config as the first `cas` entry will choose the _CASFileCache_ or _FuseCAS_ implementations, respectively. +Choosing a `filesystem` `storage` type in the worker config as the first `storage` entry will select the _CASFileCache_ _CFCExecFileSystem_. Choosing any other `storage` type will create a _FuseCAS_ _FuseExecFilesystem_. + +***We strongly recommend the use of `filesystem` `storage` as the ExecFileSystem-selecting `storage` entry, the _FuseCAS_ is experimental and may not function reliably over long hauls/with substantial load*** ## CASFileCache/CFCExecFilesystem The CASFileCache provides an Exec Filesystem via CFCExecFilesystem. The (CASFileCache)'s retention of paths is used to reflect individual files, with these paths hard-linked in CFCExecFilesystem under representative directories of the input root to signify usage. The CASFileCache directory retention system is also used to provide a configurable utilization of entire directory trees as a symlink, which was a heuristic optimization applied when substantial cost was observed setting up static trees of input links for operations compared to their execution time. `link_input_directories` in the common Worker configuration will enable this heuristic. Outputs of actions are physically streamed into CAS writes when they are observed after an action execution. -The CASFileCache's persistence in the filesystem and the availability of common POSIX features like symlinks and inode-based reference counts on almost any filesystem implementation have made it a solid choice for extremely large CAS installations - it scales to multi-TB host attached storages with millions of entries with relative ease. +The CASFileCache's persistence in the filesystem and the availability of common POSIX features like symlinks and inode-based reference counts on almost any filesystem implementation have made it a solid choice for extremely large CAS installations - it scales to multi-TB host attached storages containing millions of entries with relative ease. There are plans to improve CASFileCache that will be reflected in improved performance and memory footprint for the features used by CFCExecFilesystem. @@ -75,4 +78,4 @@ There are plans to improve CASFileCache that will be reflected in improved perfo A fuse implementation to provide Roots exists and is specifiable as well. This was an experiment to discover the capacity of a fuse to represent Roots transparently with a ContentAddressableStorage backing, and has not been fully vetted to provide the same reliability as the CFCExecFilesystem. This system is capable of blinking entire trees into existence with ease, as well as supporting write-throughs for outputs suitable for general purpose execution. Some problems with this type were initially observed and never completely resolved, including guaranteed resource release on Root destruction. This implementation is also only built to be backed by its own Memory CAS, with no general purpose CAS support added due to the difficulty of supporting a transaction model for an input tree to enforce the contract of availability. It remains unoptimized yet functional, but difficulties with integrating libfuse 3 into the bazel build, as well as time constraints, have kept it from being scaled and expanded as the rest of Buildfarm has grown. -There are plans to revisit this implementation and bring it back into viability with a CASFileCache-like backing. \ No newline at end of file +There are plans to revisit this implementation and bring it back into viability with a CASFileCache-like backing. diff --git a/_site/docs/configuration/configuration.md b/_site/docs/configuration/configuration.md index c6df9b58d3..9dae48db8f 100644 --- a/_site/docs/configuration/configuration.md +++ b/_site/docs/configuration/configuration.md @@ -7,7 +7,7 @@ has_children: true Minimal required: -``` +```yaml backplane: redisUri: "redis://localhost:6379" queues: @@ -21,24 +21,25 @@ worker: publicName: "localhost:8981" ``` -The configuration can be provided to the server and worker as a CLI argument or through the env variable `CONFIG_PATH` -For an example config containing all of the configuration values, see `examples/config.yml`. +The configuration can be provided to the server and worker as a CLI argument or through the environment variable `CONFIG_PATH` +For an example configuration containing all of the configuration values, see `examples/config.yml`. ## All Configurations ### Common -| Configuration | Accepted and _Default_ Values | Description | -|----------------------|-------------------------------|---------------------------------------------------| -| digestFunction | _SHA256_, SHA1 | Digest function for this implementation | -| defaultActionTimeout | Integer, _600_ | Default timeout value for an action (seconds) | -| maximumActionTimeout | Integer, _3600_ | Maximum allowed action timeout (seconds) | -| maxEntrySizeBytes | Long, _2147483648_ | Maximum size of a single blob accepted (bytes) | -| prometheusPort | Integer, _9090_ | Listening port of the Prometheus metrics endpoint | +| Configuration | Accepted and _Default_ Values | Command Line Argument | Description | +|------------------------------|-------------------------------|-----------------------|--------------------------------------------------------------| +| digestFunction | _SHA256_, SHA1 | | Digest function for this implementation | +| defaultActionTimeout | Integer, _600_ | | Default timeout value for an action (seconds) | +| maximumActionTimeout | Integer, _3600_ | | Maximum allowed action timeout (seconds) | +| maxEntrySizeBytes | Long, _2147483648_ | | Maximum size of a single blob accepted (bytes) | +| prometheusPort | Integer, _9090_ | --prometheus_port | Listening port of the Prometheus metrics endpoint | +| allowSymlinkTargetAbsolute | boolean, _false_ | | Permit inputs to contain symlinks with absolute path targets | Example: -``` +```yaml digestFunction: SHA1 defaultActionTimeout: 1800 maximumActionTimeout: 1800 @@ -51,33 +52,35 @@ worker: ### Server -| Configuration | Accepted and _Default_ Values | Environment Var | Description | -|----------------------------------|-------------------------------|-------------------------|-----------------------------------------------------------------------------------------------------------------------------| -| instanceType | _SHARD_ | | Type of implementation (SHARD is the only one supported) | -| name | String, _shard_ | | Implementation name | -| publicName | String, _DERIVED:port_ | INSTANCE_NAME | Host:port of the GRPC server, required to be accessible by all servers | -| actionCacheReadOnly | boolean, _false_ | | Allow/Deny writing to action cache | -| port | Integer, _8980_ | | Listening port of the GRPC server | -| casWriteTimeout | Integer, _3600_ | | CAS write timeout (seconds) | -| bytestreamTimeout | Integer, _3600_ | | Byte Stream write timeout (seconds) | -| sslCertificatePath | String, _null_ | | Absolute path of the SSL certificate (if TLS used) | -| sslPrivateKeyPath | String, _null_ | | Absolute path of the SSL private key (if TLS used) | -| runDispatchedMonitor | boolean, _true_ | | Enable an agent to monitor the operation store to ensure that dispatched operations with expired worker leases are requeued | -| dispatchedMonitorIntervalSeconds | Integer, _1_ | | Dispatched monitor's lease expiration check interval (seconds) | -| runOperationQueuer | boolean, _true_ | | Aquire execute request entries cooperatively from an arrival queue on the backplane | -| ensureOutputsPresent | boolean, _false_ | | Decide if all outputs are also present in the CAS. If any outputs are missing a cache miss is returned | -| maxCpu | Integer, _0_ | | Maximum number of CPU cores that any min/max-cores property may request (0 = unlimited) | -| maxRequeueAttempts | Integer, _5_ | | Maximum number of requeue attempts for an operation | -| useDenyList | boolean, _true_ | | Allow usage of a deny list when looking up actions and invocations (for cache only it is recommended to disable this check) | -| grpcTimeout | Integer, _3600_ | | GRPC request timeout (seconds) | -| executeKeepaliveAfterSeconds | Integer, _60_ | | Execute keep alive (seconds) | -| recordBesEvents | boolean, _false_ | | Allow recording of BES events | -| clusterId | String, _local_ | | Buildfarm cluster ID | -| cloudRegion | String, _us-east_1_ | | Deployment region in the cloud | +| Configuration | Accepted and _Default_ Values | Environment Var | Description | +|----------------------------------|-------------------------------|-----------------|-----------------------------------------------------------------------------------------------------------------------------| +| instanceType | _SHARD_ | | Type of implementation (SHARD is the only one supported) | +| name | String, _shard_ | | Implementation name | +| publicName | String, _DERIVED:port_ | INSTANCE_NAME | Host:port of the GRPC server, required to be accessible by all servers | +| actionCacheReadOnly | boolean, _false_ | | Allow/Deny writing to action cache | +| port | Integer, _8980_ | | Listening port of the GRPC server | +| casWriteTimeout | Integer, _3600_ | | CAS write timeout (seconds) | +| bytestreamTimeout | Integer, _3600_ | | Byte Stream write timeout (seconds) | +| sslCertificatePath | String, _null_ | | Absolute path of the SSL certificate (if TLS used) | +| sslPrivateKeyPath | String, _null_ | | Absolute path of the SSL private key (if TLS used) | +| runDispatchedMonitor | boolean, _true_ | | Enable an agent to monitor the operation store to ensure that dispatched operations with expired worker leases are requeued | +| dispatchedMonitorIntervalSeconds | Integer, _1_ | | Dispatched monitor's lease expiration check interval (seconds) | +| runOperationQueuer | boolean, _true_ | | Acquire execute request entries cooperatively from an arrival queue on the backplane | +| ensureOutputsPresent | boolean, _false_ | | Decide if all outputs are also present in the CAS. If any outputs are missing a cache miss is returned | +| maxCpu | Integer, _0_ | | Maximum number of CPU cores that any min/max-cores property may request (0 = unlimited) | +| maxRequeueAttempts | Integer, _5_ | | Maximum number of requeue attempts for an operation | +| useDenyList | boolean, _true_ | | Allow usage of a deny list when looking up actions and invocations (for cache only it is recommended to disable this check) | +| grpcTimeout | Integer, _3600_ | | GRPC request timeout (seconds) | +| executeKeepaliveAfterSeconds | Integer, _60_ | | Execute keep alive (seconds) | +| recordBesEvents | boolean, _false_ | | Allow recording of BES events | +| clusterId | String, _local_ | | Buildfarm cluster ID | +| cloudRegion | String, _us-east_1_ | | Deployment region in the cloud | +| gracefulShutdownSeconds | Integer, 0 | | Time in seconds to allow for connections in flight to finish when shutdown signal is received | + Example: -``` +```yaml server: instanceType: SHARD name: shard @@ -91,28 +94,30 @@ server: |--------------------------|-------------------------------|--------------------------------------------------------| | enabled | boolean, _false_ | Publish basic GRPC metrics to a Prometheus endpoint | | provideLatencyHistograms | boolean, _false_ | Publish detailed, more expensive to calculate, metrics | +| labelsToReport | List of Strings, _[]_ | Include custom metrics labels in Prometheus metrics | Example: -``` +```yaml server: grpcMetrics: enabled: false provideLatencyHistograms: false + labelsToReport: [] ``` ### Server Caches -| Configuration | Accepted and _Default_ Values | Description | -|--------------------------|-------------------------------|--------------------------------------------------------| -| directoryCacheMaxEntries | Long, _64 * 1024_ | The max number of entries that the directory cache will hold. | -| commandCacheMaxEntries | Long, _64 * 1024_ | The max number of entries that the command cache will hold. | -| digestToActionCacheMaxEntries | Long, _64 * 1024_ | The max number of entries that the digest-to-action cache will hold. | -| recentServedExecutionsCacheMaxEntries | Long, _64 * 1024_ | The max number of entries that the executions cache will hold. | +| Configuration | Accepted and _Default_ Values | Description | +|---------------------------------------|-------------------------------|----------------------------------------------------------------------| +| directoryCacheMaxEntries | Long, _64 * 1024_ | The max number of entries that the directory cache will hold. | +| commandCacheMaxEntries | Long, _64 * 1024_ | The max number of entries that the command cache will hold. | +| digestToActionCacheMaxEntries | Long, _64 * 1024_ | The max number of entries that the digest-to-action cache will hold. | +| recentServedExecutionsCacheMaxEntries | Long, _64 * 1024_ | The max number of entries that the executions cache will hold. | Example: -``` +```yaml server: caches: directoryCacheMaxEntries: 10000 @@ -130,7 +135,7 @@ server: Example: -``` +```yaml server: admin: deploymentEnvironment: AWS @@ -149,14 +154,14 @@ server: Example: -``` +```yaml server: metrics: publisher: log logLevel: INFO ``` -``` +```yaml server: metrics: publisher: aws @@ -167,45 +172,47 @@ server: ### Redis Backplane -| Configuration | Accepted and _Default_ Values | Environment Var | Description | -|------------------------------|------------------------------------------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| type | _SHARD_ | | Type of backplane. Currently, the only implemntation is SHARD utilizing Redis | -| redisUri | String, redis://localhost:6379 | REDIS_URI | Redis cluster endpoint. This must be a single URI | -| redisPassword | String, _null_ | | Redis password, if applicable | -| redisNodes | List of Strings, _null_ | | List of individual Redis nodes, if applicable | -| jedisPoolMaxTotal | Integer, _4000_ | | The size of the Redis connection pool | -| workersHashName | String, _Workers_ | | Redis key used to store a hash of registered workers | -| workerChannel | String, _WorkerChannel_ | | Redis pubsub channel key where changes of the cluster membership are announced | -| actionCachePrefix | String, _ActionCache_ | | Redis key prefix for all ActionCache entries | -| actionCacheExpire | Integer, _2419200_ | | The TTL maintained for ActionCache entries, not refreshed on getActionResult hit | -| actionBlacklistPrefix | String, _ActionBlacklist_ | | Redis key prefix for all blacklisted actions, which are rejected | -| actionBlacklistExpire | Integer, _3600_ | | The TTL maintained for action blacklist entries | -| invocationBlacklistPrefix | String, _InvocationBlacklist_ | | Redis key prefix for blacklisted invocations, suffixed with a a tool invocation ID | -| operationPrefix | String, _Operation_ | | Redis key prefix for all operations, suffixed wtih the operation's name | -| operationExpire | Integer, _604800_ | | The TTL maintained for all operations, updated on each modification | -| preQueuedOperationsListName | String, _{Arrival}:PreQueuedOperations_ | | Redis key used to store a list of ExecuteEntry awaiting transformation into QueryEntry | -| processingListName | String, _{Arrival}:ProcessingOperations_ | | Redis key of a list used to ensure reliable processing of arrival queue etries with operation watch monitoring | -| processingPrefix | String, _Processing_ | | Redis key prefix for operations which are being dequeued from the arrival queue | -| processingTimeoutMillis | Integer, _20000_ | | Delay (in ms) used to populate processing operation entries | -| queuedOperationsListName | String, _{Execution}:QueuedOperations_ | | Redis key used to store a list of QueueEntry awaiting execution by workers | -| dispatchingPrefix | String, _Dispatching_ | | Redis key prefix for operations which are being dequeued from the ready to run queue | -| dispatchingTimeoutMillis | Integer, _10000_ | | Delay (in ms) used to populate dispatching operation entries | -| dispatchedOperationsHashName | String, _DispatchedOperations_ | | Redis key of a hash of operation names to the worker lease for its execution, which are monitored by the dispatched monitor | -| operationChannelPrefix | String, _OperationChannel_ | | Redis pubsub channel prefix suffixed by an operation name | -| casPrefix | String, _ContentAddressableStorage_ | | Redis key prefix suffixed with a blob digest that maps to a set of workers with that blob's availability | -| casExpire | Integer, _604800_ | | The TTL maintained for CAS entries, which is not refreshed on any read access of the blob | -| subscribeToBackplane | boolean, _true_ | | Enable an agent of the backplane client which subscribes to worker channel and operation channel events. If disabled, responsiveness of watchers and CAS are reduced | -| runFailsafeOperation | boolean, _true_ | | Enable an agent in the backplane client which monitors watched operations and ensures they are in a known maintained, or expirable state | -| maxQueueDepth | Integer, _100000_ | | Maximum length that the ready to run queue is allowed to reach to control an arrival flow for execution | -| maxPreQueueDepth | Integer, _1000000_ | | Maximum lengh that the arrival queue is allowed to reach to control load on the Redis cluster | -| priorityQueue | boolean, _false_ | | Priority queue type allows prioritizing operations based on Bazel's --remote_execution_priority= flag | -| timeout | Integer, _10000_ | | Default timeout | -| maxAttempts | Integer, _20_ | | Maximum number of execution attempts | -| cacheCas | boolean, _false_ | | | +| Configuration | Accepted and _Default_ Values | Environment Var | Command Line Argument | Description | +|------------------------------|------------------------------------------|-----------------|-----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| type | _SHARD_ | | | Type of backplane. Currently, the only implementation is SHARD utilizing Redis | +| redisUri | String, redis://localhost:6379 | REDIS_URI | --redis_uri | Redis cluster endpoint. This must be a single URI. This can embed a username/password per RFC-3986 Section 3.2.1 and this will take precedence over `redisPassword` and `redisPasswordFile`. | +| redisPassword | String, _null_ | | | Redis password, if applicable | +| redisPasswordFile | String, _null_ | | | File to read for a Redis password. If specified, this takes precedence over `redisPassword` | +| redisNodes | List of Strings, _null_ | | | List of individual Redis nodes, if applicable | +| jedisPoolMaxTotal | Integer, _4000_ | | | The size of the Redis connection pool | +| workersHashName | String, _Workers_ | | | Redis key used to store a hash of registered workers | +| workerChannel | String, _WorkerChannel_ | | | Redis pubsub channel key where changes of the cluster membership are announced | +| actionCachePrefix | String, _ActionCache_ | | | Redis key prefix for all ActionCache entries | +| actionCacheExpire | Integer, _2419200_ | | | The TTL maintained for ActionCache entries, not refreshed on getActionResult hit | +| actionBlacklistPrefix | String, _ActionBlacklist_ | | | Redis key prefix for all blacklisted actions, which are rejected | +| actionBlacklistExpire | Integer, _3600_ | | | The TTL maintained for action blacklist entries | +| invocationBlacklistPrefix | String, _InvocationBlacklist_ | | | Redis key prefix for blacklisted invocations, suffixed with a a tool invocation ID | +| operationPrefix | String, _Operation_ | | | Redis key prefix for all operations, suffixed with the operation's name | +| operationExpire | Integer, _604800_ | | | The TTL maintained for all operations, updated on each modification | +| preQueuedOperationsListName | String, _{Arrival}:PreQueuedOperations_ | | | Redis key used to store a list of ExecuteEntry awaiting transformation into QueryEntry | +| processingListName | String, _{Arrival}:ProcessingOperations_ | | | Redis key of a list used to ensure reliable processing of arrival queue entries with operation watch monitoring | +| processingPrefix | String, _Processing_ | | | Redis key prefix for operations which are being dequeued from the arrival queue | +| processingTimeoutMillis | Integer, _20000_ | | | Delay (in ms) used to populate processing operation entries | +| queuedOperationsListName | String, _{Execution}:QueuedOperations_ | | | Redis key used to store a list of QueueEntry awaiting execution by workers | +| dispatchingPrefix | String, _Dispatching_ | | | Redis key prefix for operations which are being dequeued from the ready to run queue | +| dispatchingTimeoutMillis | Integer, _10000_ | | | Delay (in ms) used to populate dispatching operation entries | +| dispatchedOperationsHashName | String, _DispatchedOperations_ | | | Redis key of a hash of operation names to the worker lease for its execution, which are monitored by the dispatched monitor | +| operationChannelPrefix | String, _OperationChannel_ | | | Redis pubsub channel prefix suffixed by an operation name | +| casPrefix | String, _ContentAddressableStorage_ | | | Redis key prefix suffixed with a blob digest that maps to a set of workers with that blob's availability | +| casExpire | Integer, _604800_ | | | The TTL maintained for CAS entries, which is not refreshed on any read access of the blob | +| subscribeToBackplane | boolean, _true_ | | | Enable an agent of the backplane client which subscribes to worker channel and operation channel events. If disabled, responsiveness of watchers and CAS are reduced | +| runFailsafeOperation | boolean, _true_ | | | Enable an agent in the backplane client which monitors watched operations and ensures they are in a known maintained, or expirable state | +| maxQueueDepth | Integer, _100000_ | | | Maximum length that the ready to run queue is allowed to reach to control an arrival flow for execution | +| maxPreQueueDepth | Integer, _1000000_ | | | Maximum lengh that the arrival queue is allowed to reach to control load on the Redis cluster | +| priorityQueue | boolean, _false_ | | | Priority queue type allows prioritizing operations based on Bazel's --remote_execution_priority= flag | +| timeout | Integer, _10000_ | | | Default timeout | +| maxInvocationIdTimeout | Integer, _604800_ | | | Maximum TTL (Time-to-Live in second) of invocationId keys in RedisBackplane | +| maxAttempts | Integer, _20_ | | | Maximum number of execution attempts | +| cacheCas | boolean, _false_ | | | | Example: -``` +```yaml backplane: type: SHARD redisUri: "redis://localhost:6379" @@ -222,7 +229,7 @@ backplane: Example: -``` +```yaml backplane: type: SHARD redisUri: "redis://localhost:6379" @@ -238,28 +245,31 @@ backplane: ### Worker -| Configuration | Accepted and _Default_ Values | Environment Var | Description | -|----------------------------------|-------------------------------|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| port | Integer, _8981_ | | Listening port of the worker | -| publicName | String, _DERIVED:port_ | INSTANCE_NAME | Host:port of the GRPC server, required to be accessible by all servers | -| root | String, _/tmp/worker_ | | Path for all operation content storage | -| inlineContentLimit | Integer, _1048567_ | | Total size in bytes of inline content for action results, output files, stdout, stderr content | -| operationPollPeriod | Integer, _1_ | | Period between poll operations at any stage | -| executeStageWidth | Integer, _0_ | EXECUTION_STAGE_WIDTH | Number of CPU cores available for execution (0 = system available cores) | -| executeStageWidthOffset | Integer, _0_ | | Offset number of CPU cores available for execution (to allow for use by other processes) | -| inputFetchStageWidth | Integer, _0_ | | Number of concurrently available slots to fetch inputs (0 = system calculated based on CPU cores) | -| inputFetchDeadline | Integer, _60_ | | Limit on time (seconds) for input fetch stage to fetch inputs | -| linkInputDirectories | boolean, _true_ | | Use an input directory creation strategy which creates a single directory tree at the highest level containing no output paths of any kind, and symlinks that directory into an action's execroot, saving large amounts of time spent manufacturing the same read-only input hierirchy over multiple actions' executions | -| execOwner | String, _null_ | | Create exec trees containing directories that are owned by this user | -| hexBucketLevels | Integer, _0_ | | Number of levels to create for directory storage by leading byte of the hash (problematic, not recommended) | -| defaultMaxCores | Integer, _0_ | | Constrain all executions to this logical core count unless otherwise specified via min/max-cores (0 = no limit) | -| limitGlobalExecution | boolean, _false_ | | Constrain all executions to a pool of logical cores specified in executeStageWidth | -| onlyMulticoreTests | boolean, _false_ | | Only permit tests to exceed the default coresvalue for their min/max-cores range specification (only works with non-zero defaultMaxCores) | -| allowBringYourOwnContainer | boolean, _false_ | | Enable execution in a custom Docker container | -| errorOperationRemainingResources | boolean, _false_ | | | -| realInputDirectories | List of Strings, _external_ | | A list of paths that will not be subject to the effects of linkInputDirectories setting, may also be used to provide writable directories as input roots for actions which expect to be able to write to an input location and will fail if they cannot | - -``` +| Configuration | Accepted and _Default_ Values | Environment Var | Description | +|----------------------------------|-------------------------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| port | Integer, _8981_ | | Listening port of the worker | +| publicName | String, _DERIVED:port_ | INSTANCE_NAME | Host:port of the GRPC server, required to be accessible by all servers | +| root | String, _/tmp/worker_ | | Path for all operation content storage | +| inlineContentLimit | Integer, _1048567_ | | Total size in bytes of inline content for action results, output files, stdout, stderr content | +| operationPollPeriod | Integer, _1_ | | Period between poll operations at any stage | +| executeStageWidth | Integer, _0_ | EXECUTION_STAGE_WIDTH | Number of CPU cores available for execution (0 = system available cores) | +| executeStageWidthOffset | Integer, _0_ | | Offset number of CPU cores available for execution (to allow for use by other processes) | +| inputFetchStageWidth | Integer, _0_ | | Number of concurrently available slots to fetch inputs (0 = system calculated based on CPU cores) | +| inputFetchDeadline | Integer, _60_ | | Limit on time (seconds) for input fetch stage to fetch inputs | +| linkInputDirectories | boolean, _true_ | | Use an input directory creation strategy which creates a single directory tree at the highest level containing no output paths of any kind, and symlinks that directory into an action's execroot, saving large amounts of time spent manufacturing the same read-only input hierirchy over multiple actions' executions | +| execOwner | String, _null_ | | Create exec trees containing directories that are owned by this user | +| hexBucketLevels | Integer, _0_ | | Number of levels to create for directory storage by leading byte of the hash (problematic, not recommended) | +| defaultMaxCores | Integer, _0_ | | Constrain all executions to this logical core count unless otherwise specified via min/max-cores (0 = no limit) | +| limitGlobalExecution | boolean, _false_ | | Constrain all executions to a pool of logical cores specified in executeStageWidth | +| onlyMulticoreTests | boolean, _false_ | | Only permit tests to exceed the default coresvalue for their min/max-cores range specification (only works with non-zero defaultMaxCores) | +| allowBringYourOwnContainer | boolean, _false_ | | Enable execution in a custom Docker container | +| errorOperationRemainingResources | boolean, _false_ | | | +| errorOperationOutputSizeExceeded | boolean, _false_ | | Operations which produce single output files which exceed maxEntrySizeBytes will fail with a violation type which implies a user error. When disabled, the violation will indicate a transient error, with the action blacklisted. | +| realInputDirectories | List of Strings, _external_ | | A list of paths that will not be subject to the effects of linkInputDirectories setting, may also be used to provide writable directories as input roots for actions which expect to be able to write to an input location and will fail if they cannot | +| gracefulShutdownSeconds | Integer, 0 | | Time in seconds to allow for operations in flight to finish when shutdown signal is received | +| createSymlinkOutputs | boolean, _false_ | | Creates SymlinkNodes for symbolic links discovered in output paths for actions. No verification of the symlink target path occurs. Buildstream, for example, requires this. | + +```yaml worker: port: 8981 publicName: "localhost:8981" @@ -276,7 +286,7 @@ worker: Example: -``` +```yaml worker: capabilities: cas: true @@ -285,67 +295,103 @@ worker: ### Sandbox Settings -| Configuration | Accepted and _Default_ Values | Description | -|---------------|-------------------------------|---------------------------------------------------| -| alwaysUse | boolean, _false_ | Enforce that the sandbox be used on every acion. | -| selectForBlockNetwork | boolean, _false_ | `block-network` enables sandbox action execution. | -| selectForTmpFs | boolean, _false_ | `tmpfs` enables sandbox action execution. | +| Configuration | Accepted and _Default_ Values | Description | +|---------------|-------------------------------|------------------------------------------------------| +| alwaysUseSandbox | boolean, _false_ | Enforce that the sandbox be used on every acion. | +| alwaysUseCgroups | boolean, _true_ | Enforce that actions run under cgroups. | +| alwaysUseTmpFs | boolean, _false_ | Enforce that the sandbox uses tmpfs on every acion. | +| selectForBlockNetwork | boolean, _false_ | `block-network` enables sandbox action execution. | +| selectForTmpFs | boolean, _false_ | `tmpfs` enables sandbox action execution. | Example: -``` +```yaml worker: sandboxSettings: - alwaysUse: true + alwaysUseSandbox: true + alwaysUseCgroups: true + alwaysUseTmpFs: true selectForBlockNetwork: false selectForTmpFs: false ``` +Note: In order for these settings to take effect, you must also configure `limitGlobalExecution: true`. + ### Dequeue Match -| Configuration | Accepted and _Default_ Values | Description | -|------------------|-------------------------------|-------------| -| acceptEverything | boolean, _true_ | | -| allowUnmatched | boolean, _false_ | | +| Configuration | Accepted and _Default_ Values | Description | +|------------------|-------------------------------|------------------------------------------------------------------| +| allowUnmatched | boolean, _false_ | | +| properties | List of name/value pairs | Pairs of provisions available to match against action properties | Example: -``` +```yaml worker: dequeueMatchSettings: - acceptEverything: true allowUnmatched: false + properties: + - "gpu": "nvidia RTX 2090" +``` + +### Resources + +A list of limited resources that are available to the worker to be depleted by actions which execute containing a "resource:": "N" property. +Note that in order to accept resources from a configured queue, the dequeueMatchSettings must either: + * specify `allowUnmatched: true` + * contain "resource:" in properties, with either a specific limited resource count as the only accepted value for the action property or "*" + +| Configuration | Accepted Values | Description | +|---------------|-----------------|---------------------------------------| +| name | string | Resource identifier present on worker | +| amount | Integer | Resource count depleted by actions | + +Example: +```yaml +worker: + dequeueMatchSettings: + properties: + - "resource:special-compiler-license": "1" # only actions which request one compiler license at a time will be accepted + resources: + name: "special-compiler-license" + amount: 3 ``` ### Worker CAS +Unless specified, options are only relevant for FILESYSTEM type + | Configuration | Accepted and _Default_ Values | Description | -|------------------------------|-------------------------------|---------------------------------------------------------------------------------------------------------------| -| type | _FILESYSTEM_, GRPC | Type of CAS used | -| path | String, _cache_ | Local cache location relative to the 'root', or absolute | -| maxSizeBytes | Integer, _2147483648_ | Limit for contents of files retained from CAS in the cache | -| fileDirectoriesIndexInMemory | boolean, _false_ | Determines if the file directories bidirectional mapping should be stored in memory or in sqllite | -| skipLoad | boolean, _false_ | Determines if transient data on the worker should be loaded into CAS on worker startup (affects startup time) | -| target | String, _null_ | For GRPC CAS type, target for external CAS endpoint | +|------------------------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------| +| type | _FILESYSTEM_, GRPC | Type of CAS used | +| path | String, _cache_ | Local cache location relative to the 'root', or absolute | +| maxSizeBytes | Integer, _0_ | Limit for contents of files retained from CAS in the cache, value of 0 means to auto-configure to 90% of _root_/_path_ underlying filesystem space | +| fileDirectoriesIndexInMemory | boolean, _false_ | Determines if the file directories bidirectional mapping should be stored in memory or in sqlite | +| skipLoad | boolean, _false_ | Determines if transient data on the worker should be loaded into CAS on worker startup (affects startup time) | +| target | String, _null_ | For GRPC CAS type, target for external CAS endpoint | Example: -``` +This definition will create a filesystem-based CAS file cache at the path "/cache" on the worker that will reject entries over 2GiB in size, and will expire LRU blobs when the aggregate size of all blobs exceeds 2GiB in order to insert additional entries. + +```yaml worker: - cas: - type: FILESYSTEM - path: "cache" - maxSizeBytes: 2147483648 # 2 * 1024 * 1024 * 1024 - maxEntrySizeBytes: 2147483648 # 2 * 1024 * 1024 * 1024 - target: + storages: + - type: FILESYSTEM + path: "cache" + maxSizeBytes: 2147483648 # 2 * 1024 * 1024 * 1024 + maxEntrySizeBytes: 2147483648 # 2 * 1024 * 1024 * 1024 ``` +This definition elides FILESYSTEM configuration with '...', will read-through an external GRPC CAS supporting the REAPI CAS Services into its storage, and will attempt to write expiring entries into the GRPC CAS (i.e. pushing new entries into the head of a worker LRU list will drop the entries from the tail into the GRPC CAS). + ``` worker: - cas: - type: GRPC - instanceName: external-cas - target: "cas.external.com:1234" + storages: + - type: FILESYSTEM + ... + - type: GRPC + target: "cas.external.com:1234" ``` ### Execution Policies @@ -357,7 +403,7 @@ worker: Example: -``` +```yaml worker: executionPolicies: - name: test diff --git a/_site/docs/contribute/design-documents.md b/_site/docs/contribute/design-documents.md index f7b1c0b80d..ea608696f3 100644 --- a/_site/docs/contribute/design-documents.md +++ b/_site/docs/contribute/design-documents.md @@ -5,4 +5,5 @@ parent: Contribute nav_order: 2 --- -[Infinite Cache (Storage Workers)](https://docs.google.com/document/d/1IQQbWPzjSluDL25FZ9ADtNIOT90PLijQGIAC4RbwMjY/edit?usp=sharing) \ No newline at end of file +[Infinite Cache (Storage Workers)](https://docs.google.com/document/d/1IQQbWPzjSluDL25FZ9ADtNIOT90PLijQGIAC4RbwMjY/edit?usp=sharing) +[Local and Global Resources](https://docs.google.com/document/d/1u0TkmVmdMS53PWR1hgh-a_cj3NmQYE0Favv9aGFfQZs/edit?usp=sharing) \ No newline at end of file diff --git a/_site/docs/contribute/local_development.md b/_site/docs/contribute/local_development.md index 5799aa9739..156f847ab7 100644 --- a/_site/docs/contribute/local_development.md +++ b/_site/docs/contribute/local_development.md @@ -75,4 +75,4 @@ bazel run //src/main/java/build/buildfarm:buildfarm-shard-worker $PWD/examples/c Now, you should have something like this, and you can now run / debug Buildfarm Server from inside of IntelliJ, just like any other program: -![IntelliJ Buildfarm Server run configuration]]({{site.url}}{{site.baseurl}}/assets/images/intellij-server-run-config.png) +![IntelliJ Buildfarm Server run configuration]({{site.url}}{{site.baseurl}}/assets/images/intellij-server-run-config.png) diff --git a/_site/docs/execution/builds-without-the-bytes.md b/_site/docs/execution/builds-without-the-bytes.md index abda8c86b5..5c49025877 100644 --- a/_site/docs/execution/builds-without-the-bytes.md +++ b/_site/docs/execution/builds-without-the-bytes.md @@ -7,7 +7,7 @@ nav_order: 4 # Builds Without The Bytes -tl;dr: add `--build_request_id=https://host?ENSURE_OUTPUTS_PRESENT=true#$(uuidgen)` to your BWOB bazel invocations. +tl;dr: add `--build_request_id=https://host?ENSURE_OUTPUTS_PRESENT=true#$(uuidgen)`, to your BWOB bazel invocations, or enable `ensureOutputsPresent` in your config to set it globally. As proposed in this [issue](https://github.com/bazelbuild/bazel/issues/6862) and the accompanying document, bazel endeavors to provide a mechanism to be 'content-light' for remote execution, using only content reference addresses to request action execution and construct successively dependent action definitions. @@ -17,4 +17,4 @@ This puts BuildFarm in the uncomfortable position of never being able to expire To combat this, you can provide some metadata to buildfarm that will help to limit (but will not remove the possibility of) failed builds. -Bazel presents a 'correlated_invocations_id' on every request to BuildFarm, including the GetActionResult request, which it uses to retrieve cached results. Since ActionResults are the long tail survivor of actions, being retained for much longer after one executes and produces its content, this represents the most likely position where content may have been removed, and a stale reference might be provided. BuildFarm recognizes this correlated_invocations_id and if it is a URI, can parse its query parameters for behavior control. One such control is ENSURE_OUTPUTS_PRESENT for the GetActionResult request - if this query value is the string "true", BuildFarm will make a silent FindMissingBlobs check for all of the outputs of an ActionResult before responding with it. If any are missing, BuildFarm will instead return code NOT_FOUND, inspiring the client to see a cache miss, and attempt a [remote] execution. \ No newline at end of file +Bazel presents a 'correlated_invocations_id' on every request to BuildFarm, including the GetActionResult request, which it uses to retrieve cached results. Since ActionResults are the long tail survivor of actions, being retained for much longer after one executes and produces its content, this represents the most likely position where content may have been removed, and a stale reference might be provided. BuildFarm recognizes this correlated_invocations_id and if it is a URI, can parse its query parameters for behavior control. One such control is ENSURE_OUTPUTS_PRESENT for the GetActionResult request - if this query value is the string "true", BuildFarm will make a silent FindMissingBlobs check for all of the outputs of an ActionResult before responding with it. If any are missing, BuildFarm will instead return code NOT_FOUND, inspiring the client to see a cache miss, and attempt a [remote] execution. diff --git a/_site/docs/execution/environment.md b/_site/docs/execution/environment.md index 4a17aabe6b..d2cbca0b51 100644 --- a/_site/docs/execution/environment.md +++ b/_site/docs/execution/environment.md @@ -116,7 +116,7 @@ Next we will create a BUILD file to create our target image. We will use the sha ``` load("@io_bazel_rules_docker//container:container.bzl", "container_image") -java_image( +container_image( name = "buildfarm-shard-worker-ubuntu20-java14", base = "@ubuntu20_java14_image_base//image", files = [ diff --git a/_site/docs/execution/execution_policies.md b/_site/docs/execution/execution_policies.md index 19698a9925..f0eaf295d9 100644 --- a/_site/docs/execution/execution_policies.md +++ b/_site/docs/execution/execution_policies.md @@ -17,7 +17,7 @@ This policy type specifies that a worker should prepend a single path, and a num This example will use the buildfarm-provided executable `as-nobody`, which will upon execution demote itself to a `nobody` effective process owner uid, and perform an `execvp(2)` with the remaining provided program arguments, which will subsequently execute as a user that no longer matches the worker process. -``` +```yaml # default wrapper policy application worker: executionPolicies: @@ -50,7 +50,8 @@ These wrappers are used for detecting actions that rely on time. Below is a dem This addresses two problems in regards to an action's dependence on time. The 1st problem is when an action takes longer than it should because it's sleeping unnecessarily. The 2nd problem is when an action relies on time which causes it to eventually be broken on master despite the code not changing. Both problems are expressed below as unit tests. We demonstrate a time-spoofing mechanism (the re-writing of syscalls) which allows us to detect these problems generically over any action. The objective is to analyze builds for performance inefficiency and discover future instabilities before they occur. ### Issue 1 (slow test) -``` + +```bash #!/bin/bash set -euo pipefail @@ -58,16 +59,19 @@ echo -n "testing... " sleep 10; echo "done" ``` + The test takes 10 seconds to run on average. -``` -bazel test --runs_per_test=10 --config=remote //cloud/buildfarm:sleep_test + +```shell +$ bazel test --runs_per_test=10 --config=remote //cloud/buildfarm:sleep_test //cloud/buildfarm:sleep_test PASSED in 10.2s Stats over 10 runs: max = 10.2s, min = 10.1s, avg = 10.2s, dev = 0.0s ``` We can check for performance improvements by using the `skip-sleep` option. -``` -bazel test --runs_per_test=10 --config=remote --remote_default_exec_properties='skip-sleep=true' //cloud/buildfarm:sleep_test + +```shell +$ bazel test --runs_per_test=10 --config=remote --remote_default_exec_properties='skip-sleep=true' //cloud/buildfarm:sleep_test //cloud/buildfarm:sleep_test PASSED in 1.0s Stats over 10 runs: max = 1.0s, min = 0.9s, avg = 1.0s, dev = 0.0s ``` @@ -75,7 +79,8 @@ bazel test --runs_per_test=10 --config=remote --remote_default_exec_properties=' Now the test is 10x faster. If skipping sleep makes an action perform significantly faster without affecting its success rate, that would warrant further investigation into the action's implementation. ### Issue 2 (future failing test) -``` + +```bash #!/bin/bash set -euo pipefail @@ -89,12 +94,15 @@ echo "Times change." date exit -1; ``` + The test passes today, but will it pass tomorrow? Will it pass a year from now? We can find out by using the `time-shift` option. -``` -bazel test --test_output=streamed --remote_default_exec_properties='time-shift=31556952' --config=remote //cloud/buildfarm:future_fail + +```shell +$ bazel test --test_output=streamed --remote_default_exec_properties='time-shift=31556952' --config=remote //cloud/buildfarm:future_fail INFO: Found 1 test target... Times change. Mon Sep 25 18:31:09 UTC 2023 //cloud/buildfarm:future_fail FAILED in 18.0s ``` + Time is shifted to the year 2023 and the test now fails. We can fix the problem before others see it. diff --git a/_site/docs/execution/execution_properties.md b/_site/docs/execution/execution_properties.md index 85579ed099..7966c70dd2 100644 --- a/_site/docs/execution/execution_properties.md +++ b/_site/docs/execution/execution_properties.md @@ -76,37 +76,42 @@ Despite being given 1 core, they see all of the cpus and decide to spawn that ma **Standard Example:** This test will succeed when env var TESTVAR is foobar, and fail otherwise. -``` + +```shell #!/bin/bash [ "$TESTVAR" = "foobar" ] ``` -``` -./bazel test \ + +```shell +$ ./bazel test \ --remote_executor=grpc://127.0.0.1:8980 --noremote_accept_cached --nocache_test_results \ //env_test:main FAIL ``` -``` -./bazel test --remote_default_exec_properties='env-vars={"TESTVAR": "foobar"}' \ +```shell +$ ./bazel test --remote_default_exec_properties='env-vars={"TESTVAR": "foobar"}' \ --remote_executor=grpc://127.0.0.1:8980 --noremote_accept_cached --nocache_test_results \ //env_test:main PASS ``` + **Template Example:** If you give a range of cores, buildfarm has the authority to decide how many your operation actually claims. You can let buildfarm resolve this value for you (via [mustache](https://mustache.github.io/)). -``` +```bash #!/bin/bash [ "$MKL_NUM_THREADS" = "1" ] ``` -``` -./bazel test \ + +```shell +$ ./bazel test \ --remote_executor=grpc://127.0.0.1:8980 --noremote_accept_cached --nocache_test_results \ //env_test:main FAIL ``` -``` -./bazel test \ + +```shell +$ ./bazel test \ --remote_default_exec_properties='env-vars="MKL_NUM_THREADS": "{{limits.cpu.claimed}}"' \ --remote_executor=grpc://127.0.0.1:8980 --noremote_accept_cached --nocache_test_results \ //env_test:main diff --git a/_site/docs/metrics/metrics.md b/_site/docs/metrics/metrics.md index 5aa96fe56f..7b55ab83bb 100644 --- a/_site/docs/metrics/metrics.md +++ b/_site/docs/metrics/metrics.md @@ -48,69 +48,25 @@ Counter for number of operations that failed to requeue Gauge of the number of dispatched operations -**dispatched_operations_build_amount** -Gauge for the number of dispatched operations that are build actions -**dispatched_operations_test_amount** -Gauge for the number of dispatched operations that are test actions - -**dispatched_operations_unknown_amount** - -Gauge for the number of dispatched operations that could not be identified as build / test - -**dispatched_operations_from_queue_amount** - -Gauge for the number of dispatched operations that came from each queue (using "queue_name" as label) - -**dispatched_operations_tools_amount** - -Gauge for the number of dispatched operations by tool name (using "tool_name" as label) - -**dispatched_operations_mnemonics_amount** - -Gauge for the number of dispatched operations by mnemonic (using "mnemonic" as label) - -**dispatched_operations_command_tools** - -Gauge for the number of dispatched operations by cli tool (using "tool" as label) - -**dispatched_operations_targets_amount** - -Gauge for the number of dispatched operations by target (using "target" as label) - -**dispatched_operations_config_amount** - -Gauge for the number of dispatched operations by config (using "config" as label) - -**dispatched_operations_platform_properties** - -Gauge for the number of dispatched operations by platform properties (using "config" as label) - -**dispatched_operations_clients_being_served** - -The number of build clients currently being served - -**dispatched_operations_requeued_operations_amount** - -The number of dispatched operations that have been requeued **worker_pool_size** Gauge of the number of workers available -**queue_size** +**storage_worker_pool_size** -Gauge of the size of the queue (using a queue_name label for each individual queue) +Gauge of the number of storage workers available -**blocked_actions_size** +**execute_worker_pool_size** -Gauge of the number of blocked actions +Gauge of the number of execute workers available. -**blocked_invocations_size** +**queue_size** -Gauge of the number of blocked invocations +Gauge of the size of the queue (using a queue_name label for each individual queue) **actions** @@ -118,15 +74,19 @@ Counter for the number of actions processed **operations_stage_load** -Gauge for the number of operations in each stage (using a stage_name for each individual stage) +Counter for the number of operations in each stage (using a `stage_name` for each individual stage) **operation_status** -Gauge for the completed operations status (using a status_code label for each individual GRPC code) +Counter for the completed operations status (using a `status_code` label for each individual GRPC code) + +**operation_exit_code** + +Counter for the completed operations exit code (using an `exit_code` label for each individual execution exit code) **operation_worker** -Gauge for the number of operations executed on each worker (using a worker_name label for each individual worker) +Counter for the number of operations executed on each worker (using a `worker_name` label for each individual worker) **action_results** @@ -186,7 +146,7 @@ Counter showing service restarts **cas_size** -Total size of the worker's CAS in bytes +Gauge of total size of the worker's CAS in bytes **cas_ttl_s** @@ -194,7 +154,7 @@ Histogram for amount of time CAS entries live on L1 storage before expiration (s **cas_entry_count** -The total number of entries in the worker's CAS +Gauge of the total number of entries in the worker's CAS Java interceptors can be used to monitor Grpc services using Prometheus. To enable [these metrics](https://github.com/grpc-ecosystem/java-grpc-prometheus), add the following configuration to your server: ``` @@ -202,4 +162,4 @@ server: grpcMetrics: enabled: true provideLatencyHistograms: false -``` \ No newline at end of file +``` diff --git a/_site/docs/quick_start.md b/_site/docs/quick_start.md index 2e32dc30ce..8a9a9234db 100644 --- a/_site/docs/quick_start.md +++ b/_site/docs/quick_start.md @@ -6,14 +6,27 @@ nav_order: 3 # Quick Start -Here we describe how to use bazel remote caching or remote execution with buildfarm. We'll start by creating a single workspace that can be used for both. +Here we describe how to use bazel remote caching or remote execution with buildfarm. We will create a single client workspace that can be used for both. + +## Setup + +You can run this quick start on a single computer running any flavor of linux that bazel supports. A C++ compiler is used here to demonstrate action execution. This computer is the localhost for the rest of the description. + +### Backplane + +Buildfarm requires a backplane to store information that is shared between cluster members. A [redis](https://redis.io) server can be used to meet this requirement. + +Download/Install a redis-server instance and run it on your localhost. The default redis port of 6379 will be used by the default buildfarm configs. + +## Workspace Let's start with a bazel workspace with a single file to compile into an executable: Create a new directory for our workspace and add the following files: `main.cc`: -``` + +```c #include int main( int argc, char *argv[] ) @@ -23,7 +36,8 @@ int main( int argc, char *argv[] ) ``` `BUILD`: -``` + +```starlark cc_binary( name = "main", srcs = ["main.cc"], @@ -32,7 +46,7 @@ cc_binary( And an empty WORKSPACE file. -As a test, verify that `bazel run :main` builds your main program and runs it, and prints `Hello, World!`. This will ensure that you have properly installed bazel and a C++ compiler, and have a working target before moving on to remote execution. +As a test, verify that `bazel run :main` builds your main program and runs it, and prints `Hello, World!`. This will ensure that you have properly installed `bazel` and a C++ compiler, and have a working target before moving on to remote caching or remote execution. Download and extract the buildfarm repository. Each command sequence below will have the intended working directory indicated, between the client (workspace running bazel), and buildfarm. @@ -40,25 +54,35 @@ This tutorial assumes that you have a bazel binary in your path and you are in t ## Remote Caching -A Buildfarm server with an instance can be used strictly as an ActionCache and ContentAddressableStorage to improve build performance. This is an example of running a bazel client that will retrieve results if available, and store them if the cache is missed and the execution needs to run locally. +A Buildfarm cluster can be used strictly as an ActionCache (AC) and ContentAddressableStorage (CAS) to improve build performance. This is an example of running a bazel client that will retrieve results if available, otherwise store them on a cache miss after executing locally. Download the buildfarm repository and change into its directory, then: -run `bazelisk run src/main/java/build/buildfarm:buildfarm-server $PWD/examples/config.minimal.yml` + * run `bazel run src/main/java/build/buildfarm:buildfarm-server $PWD/examples/config.minimal.yml` This will wait while the server runs, indicating that it is ready for requests. -From another prompt (i.e. a separate terminal) in your newly created workspace directory from above: +A server alone does not itself store the content of action results. It acts as an endpoint for any number of workers that present storage, so we must also start a single worker. + +From another prompt (i.e. a separate terminal) in the buildfarm repository directory: + + * run `bazel run src/main/java/build/buildfarm:buildfarm-shard-worker -- --prometheus_port=9091 $PWD/examples/config.minimal.yml` -run `bazel clean` -run `bazel run --remote_cache=grpc://localhost:8980 :main` +The `--` option is bazel convention to treat all subsequent arguments as parameters to the running app, like our `--prometheus_port`, instead of interpreting them with `run` +The `--prometheus_port=9091` option allows this worker to run alongside our server, who will have started and logged that it has started a service on port `9090`. You can also turn this option off (with `--` separator), with `--prometheus_option=0` for either server or worker. +This will also wait while the worker runs, indicating it will be available to store cache content. + +From another prompt in your newly created workspace directory from above: + + * run `bazel clean` + * run `bazel run --remote_cache=grpc://localhost:8980 :main` Why do we clean here? Since we're verifying re-execution and caching, this ensures that we will execute any actions in the `run` step and interact with the remote cache. We should be attempting to retrieve cached results, and then when we miss - since we just started this memory resident server - bazel will upload the results of the execution for later use. There will be no change in the output of this bazel run if everything worked, since bazel does not provide output each time it uploads results. To prove that we have placed something in the action cache, we need to do the following: -run `bazel clean` -run `bazel run --remote_cache=localhost:8980 :main` + * run `bazel clean` + * run `bazel run --remote_cache=localhost:8980 :main` This should now print statistics on the `processes` line that indicate that you've retrieved results from the cache for your actions: @@ -68,20 +92,22 @@ INFO: 2 processes: 2 remote cache hit. ## Remote Execution (and caching) -Now we will use buildfarm for remote execution with a minimal configuration - a single memory instance, with a host-colocated worker that can execute a single process at a time - via a bazel invocation on our workspace. +Now we will use buildfarm for remote execution with a minimal configuration with a worker on the localhost that can execute a single process at a time, via a bazel invocation on our workspace. + +First, to clean out the results from the previous cached actions, flush your local redis database: -First, we should restart the buildfarm server to ensure that we get remote execution (this can also be forced from the client by using `--noremote_accept_cached`). From the buildfarm server prompt and directory: + * run `redis-cli flushdb` -interrupt a running `buildfarm-server` -run `bazelisk run src/main/java/build/buildfarm:buildfarm-server $PWD/examples/config.minimal.yml` +Next, we should restart the buildfarm server, and delete the worker's cas storage to ensure that we get remote execution (this can also be forced from the client by using `--noremote_accept_cached`). From the buildfarm server prompt and directory: -From another prompt in the buildfarm repository directory: + * interrupt the running `buildfarm-server` (i.e. Ctrl-C) + * run `bazel run src/main/java/build/buildfarm:buildfarm-server $PWD/examples/config.minimal.yml` -run `bazelisk run src/main/java/build/buildfarm:buildfarm-shard-worker $PWD/examples/config.minimal.yml` +You can leave the worker running from the Remote Caching step, it will not require a restart From another prompt, in your client workspace: -run `bazel run --remote_executor=grpc://localhost:8980 :main` + * run `bazel run --remote_executor=grpc://localhost:8980 :main` Your build should now print out the following on its `processes` summary line: @@ -94,23 +120,30 @@ That `2 remote` indicates that your compile and link ran remotely. Congratulatio ## Container Quick Start To bring up a minimal buildfarm cluster, you can run: + +```shell +$ ./examples/bf-run start ``` -./examples/bf-run start -``` + This will start all of the necessary containers at the latest version. Once the containers are up, you can build with `bazel run --remote_executor=grpc://localhost:8980 :main`. To stop the containers, run: + +```shell +$ ./examples/bf-run stop ``` -./examples/bf-run stop -``` + +## Next Steps + +We've started our worker on the same host as our server, and also the same host on which we built with bazel, but these services can be spread across many machines, per 'remote'. A large number of workers, with a relatively small number of servers (10:1 and 100:1 ratios have been used in practice), consolidating large disks and beefy multicore cpus/gpus on workers, with specialization of what work they perform for bazel builds (or other client work), and specializing servers to have hefty network connections to funnel content traffic. A buildfarm deployment can service hundreds or thousands of developers or CI processes, enabling them to benefit from each others' shared context in the AC/CAS, and the pooled execution of a fleet of worker hosts eager to consume operations and deliver results. ## Buildfarm Manager -You can now easily launch a new Buildfarm cluster locally or in AWS using an open sourced Buildfarm Manager. +You can now easily launch a new Buildfarm cluster locally or in AWS using an open sourced [Buildfarm Manager](https://github.com/80degreeswest/bfmgr). -``` -wget https://github.com/80degreeswest/bfmgr/releases/download/1.0.7/bfmgr-1.0.7.jar -java -jar bfmgr-1.0.7.jar -Navigate to http://localhost +```shell +$ wget https://github.com/80degreeswest/bfmgr/releases/download/1.0.7/bfmgr-1.0.7.jar +$ java -jar bfmgr-1.0.7.jar +$ open http://localhost ``` diff --git a/_site/docs/security/security.md b/_site/docs/security/security.md deleted file mode 100644 index a476000546..0000000000 --- a/_site/docs/security/security.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -layout: default -title: Security -has_children: false -nav_order: 6 ---- - -# Auditing Buildfarm Artifacts -The complexities of identifying and tracking open-source software (OSS) to comply with license requirements adds friction to the development and integration process. We solve this problem for buildfarm artifacts by creating an accurate "bill of materials" (BOM) containing OSS and third-party packages used to create our deployment. - -To audit buildfarm artifacts run the following: -``` -bazel build :buildfarm-server-audit :buildfarm-shard-worker-audit -``` - -To see the BOM file: -``` -cat bazel-bin/buildfarm-server.bom.yaml -``` - -The BOM file contains library names with corresponding license information. This currently only works for maven dependencies. diff --git a/_site/docs/tools/troubleshooting-bazel-remote-execution.md b/_site/docs/tools/troubleshooting-bazel-remote-execution.md index 3c18e9432c..0d8661561e 100644 --- a/_site/docs/tools/troubleshooting-bazel-remote-execution.md +++ b/_site/docs/tools/troubleshooting-bazel-remote-execution.md @@ -11,7 +11,7 @@ A typical use case: Something works locally, but breaks when remote execution is ## bazel logging -Use `bazel [build|run|test] --experimental_remote_grpc_log=` to produce a binary log of all of the grpc activity bazel performs during an invocation. This log is written to at the completion of each request, and may not contain a complete picture if a build is interrupted, or a request is currently ongoing. +Use `bazel [build|run|test] --remote_grpc_log=` (`--experimental_remote_grpc_log=` if you are using bazel older than 6.0 release) to produce a binary log of all of the grpc activity bazel performs during an invocation. This log is written to at the completion of each request, and may not contain a complete picture if a build is interrupted, or a request is currently ongoing. ## Dumping the log diff --git a/admin/main/mvn/wrapper/maven-wrapper.properties b/admin/main/mvn/wrapper/maven-wrapper.properties index cd0d451ccd..3c1106457a 100644 --- a/admin/main/mvn/wrapper/maven-wrapper.properties +++ b/admin/main/mvn/wrapper/maven-wrapper.properties @@ -1 +1 @@ -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.6/apache-maven-3.9.6-bin.zip diff --git a/admin/main/pom.xml b/admin/main/pom.xml index 006217ccb4..8e128ee64c 100644 --- a/admin/main/pom.xml +++ b/admin/main/pom.xml @@ -94,7 +94,7 @@ org.json json - 20230227 + 20231013 org.projectlombok diff --git a/admin/main/src/main/resources/proto/buildfarm.proto b/admin/main/src/main/resources/proto/buildfarm.proto index f99feb3ac9..4349babc16 100644 --- a/admin/main/src/main/resources/proto/buildfarm.proto +++ b/admin/main/src/main/resources/proto/buildfarm.proto @@ -532,7 +532,7 @@ message RedisShardBackplaneConfig { int32 max_attempts = 33; } -message ShardInstanceConfig { +message ServerInstanceConfig { bool run_dispatched_monitor = 1; int32 dispatched_monitor_interval_seconds = 2; @@ -556,7 +556,7 @@ message ShardInstanceConfig { google.protobuf.Duration grpc_timeout = 8; } -message ShardWorkerInstanceConfig { +message WorkerInstanceConfig { // whether to stream stdout from processes bool stream_stdout = 6; @@ -580,7 +580,7 @@ message ShardWorkerInstanceConfig { } message ShardWorkerConfig { - ShardWorkerInstanceConfig shard_worker_instance_config = 1; + WorkerInstanceConfig shard_worker_instance_config = 1; int32 port = 2; @@ -850,7 +850,7 @@ message InstanceConfig { oneof type { MemoryInstanceConfig memory_instance_config = 3; - ShardInstanceConfig shard_instance_config = 4; + ServerInstanceConfig shard_instance_config = 4; } } diff --git a/bazel b/bazel deleted file mode 100755 index d3aef19312..0000000000 Binary files a/bazel and /dev/null differ diff --git a/ci/base-worker-image/jammy/Dockerfile b/ci/base-worker-image/jammy/Dockerfile new file mode 100644 index 0000000000..b2cb2fed9a --- /dev/null +++ b/ci/base-worker-image/jammy/Dockerfile @@ -0,0 +1,6 @@ +# A minimal container for building a base worker image. +# Buildfarm public releases are build using this image as a starting point. +FROM ubuntu:22.04 + +RUN apt-get update +RUN apt-get -y install default-jre default-jdk build-essential libfuse2 cgroup-tools diff --git a/ci/base-worker-image/mantic/Dockerfile b/ci/base-worker-image/mantic/Dockerfile new file mode 100644 index 0000000000..ed7c200317 --- /dev/null +++ b/ci/base-worker-image/mantic/Dockerfile @@ -0,0 +1,6 @@ +# A minimal container for building a base worker image. +# Buildfarm public releases are build using this image as a starting point. +FROM ubuntu:23.04 + +RUN apt-get update +RUN apt-get -y install default-jre default-jdk build-essential libfuse2 cgroup-tools diff --git a/container/BUILD b/container/BUILD new file mode 100644 index 0000000000..52e577c30a --- /dev/null +++ b/container/BUILD @@ -0,0 +1 @@ +# Intentionally Empty diff --git a/container/defs.bzl b/container/defs.bzl new file mode 100644 index 0000000000..ba7cbddeb7 --- /dev/null +++ b/container/defs.bzl @@ -0,0 +1,28 @@ +"""Rules for ENV""" + +def _oci_image_env_impl(ctx): + """ + Helper method to write out a "key=value" pair on separate lines. This file is fed into oci_image() in the `env` kwargs. + """ + envs = { + "CONFIG_PATH": ctx.attr.configpath, + "JAVA_TOOL_OPTIONS": " ".join(ctx.attr.jvm_args), + } + builder = ctx.actions.declare_file("_%s.env.txt" % ctx.label.name) + ctx.actions.write( + output = builder, + content = "\n".join(["{}={}".format(key, value) for (key, value) in envs.items()]), + ) + return [ + DefaultInfo( + files = depset([builder]), + ), + ] + +oci_image_env = rule( + implementation = _oci_image_env_impl, + attrs = { + "configpath": attr.string(mandatory = True), + "jvm_args": attr.string_list(mandatory = True, allow_empty = False), + }, +) diff --git a/container/test/BUILD b/container/test/BUILD new file mode 100644 index 0000000000..d0f760b4bc --- /dev/null +++ b/container/test/BUILD @@ -0,0 +1,32 @@ +"""Tests for our OCI image outputs""" + +load("@container_structure_test//:defs.bzl", "container_structure_test") + +DRIVER = "docker" # Use tar if your host is not amd64, but it's a lot slower. (for example, you use hardware from Apple and you have aarch64) + +container_structure_test( + name = "worker_test", + configs = [ + # keep sorted + "example_config.yaml", + "telemetry_tools.yaml", + "worker.yaml", + "worker_wrappers.yaml", + ], + driver = DRIVER, + image = "//:buildfarm-worker_linux_amd64", + tags = ["container"], +) + +container_structure_test( + name = "server_test", + configs = [ + # keep sorted + "example_config.yaml", + "server.yaml", + "telemetry_tools.yaml", + ], + driver = DRIVER, + image = "//:buildfarm-server_linux_amd64", + tags = ["container"], +) diff --git a/container/test/example_config.yaml b/container/test/example_config.yaml new file mode 100644 index 0000000000..eb875ae66a --- /dev/null +++ b/container/test/example_config.yaml @@ -0,0 +1,15 @@ +--- +schemaVersion: "2.0.0" + +fileExistenceTests: + - name: "config.minimal.yaml" + path: "/app/build_buildfarm/config.minimal.yml" + shouldExist: true + - name: "logging.properties" + path: "/app/build_buildfarm/src/main/java/build/buildfarm/logging.properties" + shouldExist: true + +metadataTest: + envVars: + - key: CONFIG_PATH + value: /app/build_buildfarm/config.minimal.yml diff --git a/container/test/server.yaml b/container/test/server.yaml new file mode 100644 index 0000000000..40f64ed354 --- /dev/null +++ b/container/test/server.yaml @@ -0,0 +1,16 @@ +--- +schemaVersion: "2.0.0" + +fileExistenceTests: + - name: "JAR" + path: "/app/build_buildfarm/buildfarm-server_deploy.jar" + shouldExist: true + +metadataTest: + envVars: + - key: JAVA_TOOL_OPTIONS + value: "UseContainerSupport" + isRegex: true + labels: + - key: "org.opencontainers.image.source" + value: "https://github.com/bazelbuild/bazel-buildfarm" diff --git a/container/test/telemetry_tools.yaml b/container/test/telemetry_tools.yaml new file mode 100644 index 0000000000..828067f571 --- /dev/null +++ b/container/test/telemetry_tools.yaml @@ -0,0 +1,7 @@ +--- +schemaVersion: "2.0.0" + +fileExistenceTests: + - name: "opentelemetry-javaagent" + path: "/app/build_buildfarm/opentelemetry-javaagent.jar" + shouldExist: true diff --git a/container/test/worker.yaml b/container/test/worker.yaml new file mode 100644 index 0000000000..60caad9900 --- /dev/null +++ b/container/test/worker.yaml @@ -0,0 +1,21 @@ +--- +schemaVersion: "2.0.0" + +fileExistenceTests: + - name: "JAR" + path: "/app/build_buildfarm/buildfarm-shard-worker_deploy.jar" + shouldExist: true + + - name: "tini" + path: "/tini" + shouldExist: true + permissions: "-r-xr-xr-x" + +metadataTest: + envVars: + - key: JAVA_TOOL_OPTIONS + value: "UseContainerSupport" + isRegex: true + labels: + - key: "org.opencontainers.image.source" + value: "https://github.com/bazelbuild/bazel-buildfarm" diff --git a/container/test/worker_wrappers.yaml b/container/test/worker_wrappers.yaml new file mode 100644 index 0000000000..36eac05a84 --- /dev/null +++ b/container/test/worker_wrappers.yaml @@ -0,0 +1,50 @@ +--- +schemaVersion: "2.0.0" +# These align with the default paths in //src/main/java/build/buildfarm/common/config/ExecutionWrappers.java +fileExistenceTests: + - name: "cgroups cexec" + # This is installed by the OS package manager. + path: "/usr/bin/cgexec" + shouldExist: true + permissions: "-rwxr-xr-x" + + - name: "unshare" + # This is installed by the OS package manager. + path: "/usr/bin/unshare" + shouldExist: true + permissions: "-rwxr-xr-x" + + - name: "as-nobody wrapper" + path: "/app/build_buildfarm/as-nobody" + shouldExist: true + permissions: "-r-xr-xr-x" + + - name: "process-wrapper" + path: "/app/build_buildfarm/process-wrapper" + shouldExist: true + permissions: "-r-xr-xr-x" + + - name: "skip-sleep" + path: "/app/build_buildfarm/skip_sleep" + shouldExist: true + permissions: "-r-xr-xr-x" + + - name: "skip-sleep-preload" + path: "/app/build_buildfarm/skip_sleep_preload.so" + shouldExist: true + permissions: "-r--r--r--" + + - name: "delay.sh" + path: "/app/build_buildfarm/delay.sh" + shouldExist: true + permissions: "-r-xr-xr-x" + #----- + # These are documented in //:execution_wrappers + - name: "linux-sandbox" + path: "/app/build_buildfarm/linux-sandbox" + shouldExist: true + permissions: "-r-xr-xr-x" + - name: "macos-wrapper" + path: "/app/build_buildfarm/macos-wrapper.sh" + shouldExist: true + permissions: "-r-xr-xr-x" diff --git a/defs.bzl b/defs.bzl deleted file mode 100644 index 08f9a5c561..0000000000 --- a/defs.bzl +++ /dev/null @@ -1,170 +0,0 @@ -""" -buildfarm definitions that can be imported into other WORKSPACE files -""" - -load("@rules_jvm_external//:defs.bzl", "maven_install") -load("@remote_apis//:repository_rules.bzl", "switched_rules_by_language") -load( - "@io_bazel_rules_docker//repositories:repositories.bzl", - container_repositories = "repositories", -) -load("@io_grpc_grpc_java//:repositories.bzl", "grpc_java_repositories") -load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") -load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain") -load("@io_bazel_rules_k8s//k8s:k8s.bzl", "k8s_repositories") -load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies") - -IO_NETTY_MODULES = [ - "buffer", - "codec", - "codec-http", - "codec-http2", - "codec-socks", - "common", - "handler", - "handler-proxy", - "resolver", - "transport", - "transport-native-epoll", - "transport-native-kqueue", - "transport-native-unix-common", -] - -IO_GRPC_MODULES = [ - "api", - "auth", - "core", - "context", - "netty", - "stub", - "protobuf", - "testing", - "services", - "netty-shaded", -] - -COM_AWS_MODULES = [ - "autoscaling", - "core", - "ec2", - "secretsmanager", - "sns", - "ssm", - "s3", -] - -ORG_SPRING_MODULES = [ - "spring-beans", - "spring-core", - "spring-context", - "spring-web", -] - -ORG_SPRING_BOOT_MODULES = [ - "spring-boot-autoconfigure", - "spring-boot", - "spring-boot-starter-web", - "spring-boot-starter-thymeleaf", -] - -def buildfarm_init(name = "buildfarm"): - """ - Initialize the WORKSPACE for buildfarm-related targets - - Args: - name: the name of the repository - """ - maven_install( - artifacts = ["com.amazonaws:aws-java-sdk-%s:1.11.729" % module for module in COM_AWS_MODULES] + - [ - "com.fasterxml.jackson.core:jackson-databind:2.15.0", - "com.github.ben-manes.caffeine:caffeine:2.9.0", - "com.github.docker-java:docker-java:3.2.11", - "com.github.jnr:jffi:1.2.16", - "com.github.jnr:jffi:jar:native:1.2.16", - "com.github.jnr:jnr-constants:0.9.9", - "com.github.jnr:jnr-ffi:2.1.7", - "com.github.jnr:jnr-posix:3.0.53", - "com.github.pcj:google-options:1.0.0", - "com.github.serceman:jnr-fuse:0.5.5", - "com.github.luben:zstd-jni:1.5.5-7", - "com.github.oshi:oshi-core:6.4.0", - "com.google.auth:google-auth-library-credentials:0.9.1", - "com.google.auth:google-auth-library-oauth2-http:0.9.1", - "com.google.code.findbugs:jsr305:3.0.1", - "com.google.code.gson:gson:2.9.0", - "com.google.errorprone:error_prone_annotations:2.9.0", - "com.google.errorprone:error_prone_core:0.92", - "com.google.guava:failureaccess:1.0.1", - "com.google.guava:guava:31.1-jre", - "com.google.j2objc:j2objc-annotations:1.1", - "com.google.jimfs:jimfs:1.1", - "com.google.protobuf:protobuf-java-util:3.10.0", - "com.google.protobuf:protobuf-java:3.10.0", - "com.google.truth:truth:0.44", - "org.slf4j:slf4j-simple:1.7.35", - "com.googlecode.json-simple:json-simple:1.1.1", - "com.jayway.jsonpath:json-path:2.4.0", - "io.github.lognet:grpc-spring-boot-starter:4.5.4", - "org.bouncycastle:bcprov-jdk15on:1.70", - "net.jcip:jcip-annotations:1.0", - ] + ["io.netty:netty-%s:4.1.90.Final" % module for module in IO_NETTY_MODULES] + - ["io.grpc:grpc-%s:1.53.0" % module for module in IO_GRPC_MODULES] + - [ - "io.prometheus:simpleclient:0.10.0", - "io.prometheus:simpleclient_hotspot:0.10.0", - "io.prometheus:simpleclient_httpserver:0.10.0", - "junit:junit:4.13.1", - "javax.annotation:javax.annotation-api:1.3.2", - "net.javacrumbs.future-converter:future-converter-java8-guava:1.2.0", - "org.apache.commons:commons-compress:1.21", - "org.apache.commons:commons-pool2:2.9.0", - "org.apache.commons:commons-lang3:3.12.0", - "commons-io:commons-io:2.11.0", - "me.dinowernli:java-grpc-prometheus:0.5.0", - "org.apache.tomcat:annotations-api:6.0.53", - "org.checkerframework:checker-qual:2.5.2", - "org.mockito:mockito-core:2.25.0", - "org.openjdk.jmh:jmh-core:1.23", - "org.openjdk.jmh:jmh-generator-annprocess:1.23", - "org.redisson:redisson:3.13.1", - ] + ["org.springframework.boot:%s:2.7.4" % module for module in ORG_SPRING_BOOT_MODULES] + - ["org.springframework:%s:5.3.23" % module for module in ORG_SPRING_MODULES] + - [ - "org.threeten:threetenbp:1.3.3", - "org.xerial:sqlite-jdbc:3.34.0", - "org.jetbrains:annotations:16.0.2", - "org.yaml:snakeyaml:2.0", - "org.projectlombok:lombok:1.18.24", - ], - generate_compat_repositories = True, - repositories = [ - "https://repo.maven.apache.org/maven2", - "https://jcenter.bintray.com", - ], - ) - - switched_rules_by_language( - name = "bazel_remote_apis_imports", - java = True, - ) - - container_repositories() - - protobuf_deps() - - grpc_java_repositories() - - k8s_repositories() - - rules_pkg_dependencies() - - native.bind( - name = "jar/redis/clients/jedis", - actual = "@jedis//jar", - ) - - llvm_toolchain( - name = "llvm_toolchain", - llvm_version = "16.0.0", - ) diff --git a/deps.bzl b/deps.bzl deleted file mode 100644 index 5f5f0073b2..0000000000 --- a/deps.bzl +++ /dev/null @@ -1,201 +0,0 @@ -""" -buildfarm dependencies that can be imported into other WORKSPACE files -""" - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file", "http_jar") -load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") - -RULES_JVM_EXTERNAL_TAG = "4.2" -RULES_JVM_EXTERNAL_SHA = "cd1a77b7b02e8e008439ca76fd34f5b07aecb8c752961f9640dea15e9e5ba1ca" - -def archive_dependencies(third_party): - return [ - { - "name": "platforms", - "urls": [ - "https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.6/platforms-0.0.6.tar.gz", - "https://github.com/bazelbuild/platforms/releases/download/0.0.6/platforms-0.0.6.tar.gz", - ], - "sha256": "5308fc1d8865406a49427ba24a9ab53087f17f5266a7aabbfc28823f3916e1ca", - }, - { - "name": "rules_jvm_external", - "strip_prefix": "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, - "sha256": RULES_JVM_EXTERNAL_SHA, - "url": "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, - }, - { - "name": "rules_pkg", - "sha256": "8a298e832762eda1830597d64fe7db58178aa84cd5926d76d5b744d6558941c2", - "url": "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.7.0/rules_pkg-0.7.0.tar.gz", - }, - - # Kubernetes rules. Useful for local development with tilt. - { - "name": "io_bazel_rules_k8s", - "strip_prefix": "rules_k8s-0.7", - "url": "https://github.com/bazelbuild/rules_k8s/archive/refs/tags/v0.7.tar.gz", - "sha256": "ce5b9bc0926681e2e7f2147b49096f143e6cbc783e71bc1d4f36ca76b00e6f4a", - }, - - # Needed for "well-known protos" and @com_google_protobuf//:protoc. - { - "name": "com_google_protobuf", - "sha256": "dd513a79c7d7e45cbaeaf7655289f78fd6b806e52dbbd7018ef4e3cf5cff697a", - "strip_prefix": "protobuf-3.15.8", - "urls": ["https://github.com/protocolbuffers/protobuf/archive/v3.15.8.zip"], - }, - { - "name": "com_github_bazelbuild_buildtools", - "sha256": "a02ba93b96a8151b5d8d3466580f6c1f7e77212c4eb181cba53eb2cae7752a23", - "strip_prefix": "buildtools-3.5.0", - "urls": ["https://github.com/bazelbuild/buildtools/archive/3.5.0.tar.gz"], - }, - - # Needed for @grpc_java//compiler:grpc_java_plugin. - { - "name": "io_grpc_grpc_java", - "sha256": "78bf175f9a8fa23cda724bbef52ad9d0d555cdd1122bcb06484b91174f931239", - "strip_prefix": "grpc-java-1.54.1", - "urls": ["https://github.com/grpc/grpc-java/archive/v1.54.1.zip"], - }, - { - "name": "rules_pkg", - "sha256": "335632735e625d408870ec3e361e192e99ef7462315caa887417f4d88c4c8fb8", - "urls": [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.9.0/rules_pkg-0.9.0.tar.gz", - "https://github.com/bazelbuild/rules_pkg/releases/download/0.9.0/rules_pkg-0.9.0.tar.gz", - ], - }, - { - "name": "rules_license", - "sha256": "6157e1e68378532d0241ecd15d3c45f6e5cfd98fc10846045509fb2a7cc9e381", - "urls": [ - "https://github.com/bazelbuild/rules_license/releases/download/0.0.4/rules_license-0.0.4.tar.gz", - "https://mirror.bazel.build/github.com/bazelbuild/rules_license/releases/download/0.0.4/rules_license-0.0.4.tar.gz", - ], - }, - - # The APIs that we implement. - { - "name": "googleapis", - "build_file": "%s:BUILD.googleapis" % third_party, - "patch_cmds": ["find google -name 'BUILD.bazel' -type f -delete"], - "patch_cmds_win": ["Remove-Item google -Recurse -Include *.bazel"], - "sha256": "745cb3c2e538e33a07e2e467a15228ccbecadc1337239f6740d57a74d9cdef81", - "strip_prefix": "googleapis-6598bb829c9e9a534be674649ffd1b4671a821f9", - "url": "https://github.com/googleapis/googleapis/archive/6598bb829c9e9a534be674649ffd1b4671a821f9.zip", - }, - { - "name": "remote_apis", - "build_file": "%s:BUILD.remote_apis" % third_party, - "patch_args": ["-p1"], - "patches": ["%s/remote-apis:remote-apis.patch" % third_party], - "sha256": "743d2d5b5504029f3f825beb869ce0ec2330b647b3ee465a4f39ca82df83f8bf", - "strip_prefix": "remote-apis-636121a32fa7b9114311374e4786597d8e7a69f3", - "url": "https://github.com/bazelbuild/remote-apis/archive/636121a32fa7b9114311374e4786597d8e7a69f3.zip", - }, - { - "name": "rules_cc", - "sha256": "3d9e271e2876ba42e114c9b9bc51454e379cbf0ec9ef9d40e2ae4cec61a31b40", - "strip_prefix": "rules_cc-0.0.6", - "url": "https://github.com/bazelbuild/rules_cc/releases/download/0.0.6/rules_cc-0.0.6.tar.gz", - }, - - # Used to format proto files - { - "name": "com_grail_bazel_toolchain", - "sha256": "b2d168315dd0785f170b2b306b86e577c36e812b8f8b05568f9403141f2c24dd", - "strip_prefix": "toolchains_llvm-0.9", - "url": "https://github.com/grailbio/bazel-toolchain/archive/refs/tags/0.9.tar.gz", - "patch_args": ["-p1"], - "patches": ["%s:clang_toolchain.patch" % third_party], - }, - { - "name": "io_bazel_rules_docker", - "sha256": "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf", - "urls": ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"], - }, - - # Bazel is referenced as a dependency so that buildfarm can access the linux-sandbox as a potential execution wrapper. - { - "name": "bazel", - "sha256": "06d3dbcba2286d45fc6479a87ccc649055821fc6da0c3c6801e73da780068397", - "strip_prefix": "bazel-6.0.0", - "urls": ["https://github.com/bazelbuild/bazel/archive/refs/tags/6.0.0.tar.gz"], - "patch_args": ["-p1"], - "patches": ["%s/bazel:bazel_visibility.patch" % third_party], - }, - { - "name": "blake3", - "sha256": "bb529ba133c0256df49139bd403c17835edbf60d2ecd6463549c6a5fe279364d", - "build_file": "%s:BUILD.blake3" % third_party, - "url": "https://github.com/BLAKE3-team/BLAKE3/archive/refs/tags/1.3.3.zip", - "strip_prefix": "BLAKE3-1.3.3", - }, - - # Optional execution wrappers - { - "name": "skip_sleep", - "build_file": "%s:BUILD.skip_sleep" % third_party, - "sha256": "03980702e8e9b757df68aa26493ca4e8573770f15dd8a6684de728b9cb8549f1", - "strip_prefix": "TARDIS-f54fa4743e67763bb1ad77039b3d15be64e2e564", - "url": "https://github.com/Unilang/TARDIS/archive/f54fa4743e67763bb1ad77039b3d15be64e2e564.zip", - }, - { - "name": "rules_oss_audit", - "sha256": "02962810bcf82d0c66f929ccc163423f53773b8b154574ca956345523243e70d", - "strip_prefix": "rules_oss_audit-1b2690cefd5a960c181e0d89bf3c076294a0e6f4", - "url": "https://github.com/vmware/rules_oss_audit/archive/1b2690cefd5a960c181e0d89bf3c076294a0e6f4.zip", - }, - ] - -def buildfarm_dependencies(repository_name = "build_buildfarm"): - """ - Define all 3rd party archive rules for buildfarm - - Args: - repository_name: the name of the repository - """ - third_party = "@%s//third_party" % repository_name - for dependency in archive_dependencies(third_party): - params = {} - params.update(**dependency) - name = params.pop("name") - maybe(http_archive, name, **params) - - # Enhanced jedis 3.2.0 containing several convenience, performance, and - # robustness changes. - # Notable features include: - # Cluster request pipelining, used for batching requests for operation - # monitors and CAS index. - # Blocking request (b* prefix) interruptibility, using client - # connection reset. - # Singleton-redis-as-cluster - support treating a non-clustered redis - # endpoint as a cluster of 1 node. - # Other changes are redis version-forward treatment of spop and visibility - # into errors in cluster unreachable and cluster retry exhaustion. - # Details at https://github.com/werkt/jedis/releases/tag/3.2.0-594c20da20 - maybe( - http_jar, - "jedis", - sha256 = "72c749c02b775c0371cfc8ebcf713032910b7c6f365d958c3c000838f43f6a65", - urls = [ - "https://github.com/werkt/jedis/releases/download/3.2.0-594c20da20/jedis-3.2.0-594c20da20.jar", - ], - ) - - maybe( - http_jar, - "opentelemetry", - sha256 = "0523287984978c091be0d22a5c61f0bce8267eeafbbae58c98abaf99c9396832", - urls = [ - "https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v1.11.0/opentelemetry-javaagent.jar", - ], - ) - - http_file( - name = "tini", - sha256 = "12d20136605531b09a2c2dac02ccee85e1b874eb322ef6baf7561cd93f93c855", - urls = ["https://github.com/krallin/tini/releases/download/v0.18.0/tini"], - ) diff --git a/examples/bf-run b/examples/bf-run index b74b020f6f..ffea9faf42 100755 --- a/examples/bf-run +++ b/examples/bf-run @@ -3,7 +3,7 @@ set -e REDIS_NAME="buildfarm-redis" -REDIS_IMAGE="redis:5.0.9" +REDIS_IMAGE="redis:7.2.4" SERVER_NAME="buildfarm-server" SERVER_IMAGE="bazelbuild/buildfarm-server:latest" diff --git a/examples/config.yml b/examples/config.yml index bdd92e6697..4e66e00bd8 100644 --- a/examples/config.yml +++ b/examples/config.yml @@ -3,14 +3,17 @@ defaultActionTimeout: 600 maximumActionTimeout: 3600 maxEntrySizeBytes: 2147483648 # 2 * 1024 * 1024 * 1024 prometheusPort: 9090 +allowSymlinkTargetAbsolute: false server: instanceType: SHARD name: shard actionCacheReadOnly: false port: 8980 grpcMetrics: - enabled: false - provideLatencyHistograms: false + enabled: true + provideLatencyHistograms: true + latencyBuckets: [0.001, 0.01, 0.1, 1, 5, 10, 20, 40, 60, +Infinity] + labelsToReport: [] maxInboundMessageSizeBytes: 0 maxInboundMetadataSize: 0 casWriteTimeout: 3600 @@ -21,6 +24,7 @@ server: dispatchedMonitorIntervalSeconds: 1 runOperationQueuer: true ensureOutputsPresent: false + runFailsafeOperation: true maxCpu: 0 maxRequeueAttempts: 5 useDenyList: true @@ -29,6 +33,7 @@ server: recordBesEvents: false clusterId: local cloudRegion: us-east-1 + gracefulShutdownSeconds: 0 caches: directoryCacheMaxEntries: 10000 commandCacheMaxEntries: 10000 @@ -37,7 +42,6 @@ server: admin: deploymentEnvironment: AWS clusterEndpoint: "grpc://localhost" - enableGracefulShutdown: false metrics: publisher: LOG logLevel: FINEST @@ -70,13 +74,12 @@ backplane: operationChannelPrefix: "OperationChannel" casPrefix: "ContentAddressableStorage" casExpire: 604800 # 1 week - subscribeToBackplane: true - runFailsafeOperation: true maxQueueDepth: 100000 maxPreQueueDepth: 1000000 priorityQueue: false priorityPollIntervalMillis: 100 timeout: 10000 + maxInvocationIdTimeout: 604800 maxAttempts: 20 cacheCas: false queues: @@ -93,6 +96,7 @@ worker: grpcMetrics: enabled: false provideLatencyHistograms: false + latencyBuckets: [0.001, 0.005, 0.01, 0.05, 0.075, 0.1, 0.25, 0.5, 1.0, 2.0, 5.0, 10.0] capabilities: cas: true execution: true @@ -100,7 +104,6 @@ worker: inlineContentLimit: 1048567 # 1024 * 1024 operationPollPeriod: 1 dequeueMatchSettings: - acceptEverything: true allowUnmatched: false storages: - type: FILESYSTEM @@ -110,24 +113,29 @@ worker: skipLoad: false hexBucketLevels: 0 execRootCopyFallback: false - target: - publishTtlMetric: false + #- type: GRPC + # target: "grpc://host:port" executeStageWidth: 1 inputFetchStageWidth: 1 inputFetchDeadline: 60 linkInputDirectories: true - realInputDirectories: - - "external" + linkedInputDirectories: + - "(?!external)[^/]+" execOwner: defaultMaxCores: 0 limitGlobalExecution: false onlyMulticoreTests: false allowBringYourOwnContainer: false errorOperationRemainingResources: false + errorOperationOutputSizeExceeded: false + gracefulShutdownSeconds: 0 sandboxSettings: - alwaysUse: false + alwaysUseSandbox: false + alwaysUseCgroups: false + alwaysUseTmpFs: false selectForBlockNetwork: false selectForTmpFs: false + createSymlinkOutputs: false executionPolicies: - name: test executionWrapper: diff --git a/extensions.bzl b/extensions.bzl new file mode 100644 index 0000000000..e355bc6f76 --- /dev/null +++ b/extensions.bzl @@ -0,0 +1,71 @@ +""" +buildfarm dependencies that can be imported into other WORKSPACE files +""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file", "http_jar") + +def archive_dependencies(third_party): + return [ + # Bazel is referenced as a dependency so that buildfarm can access the linux-sandbox as a potential execution wrapper. + { + "name": "bazel", + "sha256": "06d3dbcba2286d45fc6479a87ccc649055821fc6da0c3c6801e73da780068397", + "strip_prefix": "bazel-6.0.0", + "urls": ["https://github.com/bazelbuild/bazel/archive/refs/tags/6.0.0.tar.gz"], + "patch_args": ["-p1"], + "patches": ["%s/bazel:bazel_visibility.patch" % third_party], + }, + + # Optional execution wrappers + { + "name": "skip_sleep", + "build_file": "%s:BUILD.skip_sleep" % third_party, + "sha256": "03980702e8e9b757df68aa26493ca4e8573770f15dd8a6684de728b9cb8549f1", + "strip_prefix": "TARDIS-f54fa4743e67763bb1ad77039b3d15be64e2e564", + "url": "https://github.com/Unilang/TARDIS/archive/f54fa4743e67763bb1ad77039b3d15be64e2e564.zip", + }, + ] + +def _buildfarm_extension_impl(_ctx): + """ + Define all 3rd party archive rules for buildfarm + """ + third_party = "//third_party" + for dependency in archive_dependencies(third_party): + params = {} + params.update(**dependency) + http_archive(**params) + + http_jar( + name = "opentelemetry", + sha256 = "eccd069da36031667e5698705a6838d173d527a5affce6cc514a14da9dbf57d7", + urls = [ + "https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v1.28.0/opentelemetry-javaagent.jar", + ], + ) + + http_file( + name = "tini", + sha256 = "12d20136605531b09a2c2dac02ccee85e1b874eb322ef6baf7561cd93f93c855", + urls = ["https://github.com/krallin/tini/releases/download/v0.18.0/tini"], + ) + + if not native.existing_rule("com_github_grpc_grpc"): + http_archive( + name = "com_github_grpc_grpc", + strip_prefix = "grpc-1.46.0", + sha256 = "67423a4cd706ce16a88d1549297023f0f9f0d695a96dd684adc21e67b021f9bc", + urls = [ + "https://github.com/grpc/grpc/archive/v1.46.0.tar.gz", + ], + ) + http_archive( + name = "io_grpc_grpc_proto", + sha256 = "729ac127a003836d539ed9da72a21e094aac4c4609e0481d6fc9e28a844e11af", + strip_prefix = "grpc-proto-4f245d272a28a680606c0739753506880cf33b5f", + urls = ["https://github.com/grpc/grpc-proto/archive/4f245d272a28a680606c0739753506880cf33b5f.zip"], + ) + +build_deps = module_extension( + implementation = _buildfarm_extension_impl, +) diff --git a/generate_coverage.sh b/generate_coverage.sh index 7560334dbc..703ad93883 100755 --- a/generate_coverage.sh +++ b/generate_coverage.sh @@ -72,8 +72,8 @@ gate_lcov_results() { lcov_results=`$LCOV_TOOL --summary $traces 2>&1` # extract our percentage numbers - local line_percentage=$(echo "$lcov_results" | tr '\n' ' ' | awk '{print $8}' | sed 's/.$//') - local function_percentage=$(echo "$lcov_results" | tr '\n' ' ' | awk '{print $14}' | sed 's/.$//') + local line_percentage=$(echo "$lcov_results" | tr '\n' ' ' | awk '{print $5}' | sed 's/.$//') + local function_percentage=$(echo "$lcov_results" | tr '\n' ' ' | awk '{print $11}' | sed 's/.$//') line_percentage=${line_percentage%.*} function_percentage=${function_percentage%.*} diff --git a/images.bzl b/images.bzl deleted file mode 100644 index 89d5fd22da..0000000000 --- a/images.bzl +++ /dev/null @@ -1,43 +0,0 @@ -""" -buildfarm images that can be imported into other WORKSPACE files -""" - -load("@io_bazel_rules_docker//repositories:deps.bzl", container_deps = "deps") -load("@io_bazel_rules_docker//container:container.bzl", "container_pull") - -def buildfarm_images(): - """ - Pull the necessary base containers to be used for image definitions. - """ - - container_deps() - - container_pull( - name = "java_image_base", - digest = "sha256:8c1769cb253bdecc257470f7fba05446a55b70805fa686f227a11655a90dfe9e", - registry = "gcr.io", - repository = "distroless/java", - ) - - container_pull( - name = "java_debug_image_base", - digest = "sha256:57c99181c9dea202a185970678f723496861b4ce3c534f35f29fe58964eb720c", - registry = "gcr.io", - repository = "distroless/java", - ) - - container_pull( - name = "ubuntu-bionic", - digest = "sha256:4bc527c7a288da405f2041928c63d0a6479a120ad63461c2f124c944def54be2", - registry = "index.docker.io", - repository = "bazelbuild/buildfarm-worker-base", - tag = "bionic-java11-gcc", - ) - - container_pull( - name = "amazon_corretto_java_image_base", - registry = "index.docker.io", - repository = "amazoncorretto", - tag = "19", - digest = "sha256:81d0df4412140416b27211c999e1f3c4565ae89a5cd92889475d20af422ba507", - ) diff --git a/jvm_flags.bzl b/jvm_flags.bzl index 363f161465..440e0718aa 100644 --- a/jvm_flags.bzl +++ b/jvm_flags.bzl @@ -54,6 +54,12 @@ def ensure_accurate_metadata(): "//config:windows": ["-Dsun.nio.fs.ensureAccurateMetadata=true"], }) +def add_opens_sun_nio_fs(): + return select({ + "//conditions:default": [], + "//config:windows": ["--add-opens java.base/sun.nio.fs=ALL-UNNAMED"], + }) + def server_telemetry(): return select({ "//config:open_telemetry": SERVER_TELEMETRY_JVM_FLAGS, @@ -67,7 +73,7 @@ def worker_telemetry(): }) def server_jvm_flags(): - return RECOMMENDED_JVM_FLAGS + DEFAULT_LOGGING_CONFIG + ensure_accurate_metadata() + server_telemetry() + return RECOMMENDED_JVM_FLAGS + DEFAULT_LOGGING_CONFIG + ensure_accurate_metadata() + add_opens_sun_nio_fs() + server_telemetry() def worker_jvm_flags(): - return RECOMMENDED_JVM_FLAGS + DEFAULT_LOGGING_CONFIG + ensure_accurate_metadata() + worker_telemetry() + return RECOMMENDED_JVM_FLAGS + DEFAULT_LOGGING_CONFIG + ensure_accurate_metadata() + add_opens_sun_nio_fs() + worker_telemetry() diff --git a/kubernetes/deployments/BUILD b/kubernetes/deployments/BUILD deleted file mode 100644 index cd2051dce8..0000000000 --- a/kubernetes/deployments/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_k8s//k8s:object.bzl", "k8s_object") - -k8s_object( - name = "kubernetes", - kind = "deployment", - template = ":kubernetes.yaml", -) - -k8s_object( - name = "server", - kind = "deployment", - template = ":server.yaml", -) - -k8s_object( - name = "shard-worker", - kind = "deployment", - template = ":shard-worker.yaml", -) - -k8s_object( - name = "redis-cluster", - kind = "deployment", - template = ":redis-cluster.yaml", -) diff --git a/kubernetes/deployments/kubernetes.yaml b/kubernetes/deployments/kubernetes.yaml deleted file mode 100644 index 5bc400448e..0000000000 --- a/kubernetes/deployments/kubernetes.yaml +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Namespace -metadata: - name: kubernetes-dashboard - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kubernetes-dashboard - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kubernetes-dashboard -spec: - ports: - - port: 443 - targetPort: 8443 - selector: - k8s-app: kubernetes-dashboard - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-certs - namespace: kubernetes-dashboard -type: Opaque - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-csrf - namespace: kubernetes-dashboard -type: Opaque -data: - csrf: "" - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-key-holder - namespace: kubernetes-dashboard -type: Opaque - ---- - -kind: ConfigMap -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-settings - namespace: kubernetes-dashboard - ---- - -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kubernetes-dashboard -rules: - # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - - apiGroups: [""] - resources: ["secrets"] - resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] - verbs: ["get", "update", "delete"] - # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - - apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["kubernetes-dashboard-settings"] - verbs: ["get", "update"] - # Allow Dashboard to get metrics. - - apiGroups: [""] - resources: ["services"] - resourceNames: ["heapster", "dashboard-metrics-scraper"] - verbs: ["proxy"] - - apiGroups: [""] - resources: ["services/proxy"] - resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] - verbs: ["get"] - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard -rules: - # Allow Metrics Scraper to get metrics from the Metrics server - - apiGroups: ["metrics.k8s.io"] - resources: ["pods", "nodes"] - verbs: ["get", "list", "watch"] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kubernetes-dashboard -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kubernetes-dashboard - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kubernetes-dashboard -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kubernetes-dashboard - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kubernetes-dashboard -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: kubernetes-dashboard - template: - metadata: - labels: - k8s-app: kubernetes-dashboard - spec: - containers: - - name: kubernetes-dashboard - image: kubernetesui/dashboard:v2.4.0 - imagePullPolicy: Always - ports: - - containerPort: 8443 - protocol: TCP - args: - - --auto-generate-certificates - - --namespace=kubernetes-dashboard - # Uncomment the following line to manually specify Kubernetes API server Host - # If not specified, Dashboard will attempt to auto discover the API server and connect - # to it. Uncomment only if the default does not work. - # - --apiserver-host=http://my-address:port - volumeMounts: - - name: kubernetes-dashboard-certs - mountPath: /certs - # Create on-disk volume to store exec logs - - mountPath: /tmp - name: tmp-volume - livenessProbe: - httpGet: - scheme: HTTPS - path: / - port: 8443 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - volumes: - - name: kubernetes-dashboard-certs - secret: - secretName: kubernetes-dashboard-certs - - name: tmp-volume - emptyDir: {} - serviceAccountName: kubernetes-dashboard - nodeSelector: - "kubernetes.io/os": linux - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kubernetes-dashboard -spec: - ports: - - port: 8000 - targetPort: 8000 - selector: - k8s-app: dashboard-metrics-scraper - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kubernetes-dashboard -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: dashboard-metrics-scraper - template: - metadata: - labels: - k8s-app: dashboard-metrics-scraper - spec: - securityContext: - seccompProfile: - type: RuntimeDefault - containers: - - name: dashboard-metrics-scraper - image: kubernetesui/metrics-scraper:v1.0.7 - ports: - - containerPort: 8000 - protocol: TCP - livenessProbe: - httpGet: - scheme: HTTP - path: / - port: 8000 - initialDelaySeconds: 30 - timeoutSeconds: 30 - volumeMounts: - - mountPath: /tmp - name: tmp-volume - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - serviceAccountName: kubernetes-dashboard - nodeSelector: - "kubernetes.io/os": linux - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - volumes: - - name: tmp-volume - emptyDir: {} diff --git a/kubernetes/deployments/redis-cluster.yaml b/kubernetes/deployments/redis-cluster.yaml deleted file mode 100644 index 9cd6263d45..0000000000 --- a/kubernetes/deployments/redis-cluster.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: redis-cluster - labels: - name: redis-cluster -spec: - replicas: 1 - selector: - matchLabels: - name: redis-cluster - template: - metadata: - labels: - name: redis-cluster - spec: - #subdomain: primary - containers: - - name: redis-cluster - image: redis:5.0.4 - ports: - - containerPort: 6379 diff --git a/kubernetes/deployments/server.yaml b/kubernetes/deployments/server.yaml deleted file mode 100644 index 163f65697f..0000000000 --- a/kubernetes/deployments/server.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: server - labels: - name: server -spec: - replicas: 1 - selector: - matchLabels: - name: server - template: - metadata: - labels: - name: server - spec: - containers: - - name: server - image: buildfarm-server-image - ports: - - containerPort: 8980 - name: "server-comm" - - containerPort: 9092 - name: "server-metrics" - env: - - name: REDIS_URI - value: "redis://redis-cluster-service:6379" - diff --git a/kubernetes/deployments/shard-worker.yaml b/kubernetes/deployments/shard-worker.yaml deleted file mode 100644 index 7a914b6db0..0000000000 --- a/kubernetes/deployments/shard-worker.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: shard-worker - labels: - name: shard-worker -spec: - replicas: 1 - selector: - matchLabels: - name: shard-worker - template: - metadata: - labels: - name: shard-worker - spec: - containers: - - name: shard-worker - image: buildfarm-shard-worker-image - resources: - limits: - cpu: "2" - requests: - cpu: "2" - ports: - - containerPort: 8981 - name: "worker-comm" - - containerPort: 9091 - name: "worker-metrics" - env: - - name: REDIS_URI - value: "redis://redis-cluster-service:6379" diff --git a/kubernetes/helm-charts/buildfarm/.gitignore b/kubernetes/helm-charts/buildfarm/.gitignore new file mode 100644 index 0000000000..8d8946152c --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/.gitignore @@ -0,0 +1,2 @@ +charts +Chart.lock diff --git a/kubernetes/helm-charts/buildfarm/.helmignore b/kubernetes/helm-charts/buildfarm/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/kubernetes/helm-charts/buildfarm/Chart.yaml b/kubernetes/helm-charts/buildfarm/Chart.yaml new file mode 100644 index 0000000000..66c2560526 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/Chart.yaml @@ -0,0 +1,30 @@ +apiVersion: v2 +name: buildfarm +description: A Helm chart for bazel buildfarm + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.2.4 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: 2.8.0 + +dependencies: + - condition: redis.enabled + name: redis + repository: oci://registry-1.docker.io/bitnamicharts + version: ~18.14.2 diff --git a/kubernetes/helm-charts/buildfarm/templates/NOTES.txt b/kubernetes/helm-charts/buildfarm/templates/NOTES.txt new file mode 100644 index 0000000000..92421375fb --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.server.ingress.enabled }} +{{- range $host := .Values.server.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.server.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "buildfarm.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "buildfarm.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "buildfarm.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.server.service.port }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "buildfarm.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/kubernetes/helm-charts/buildfarm/templates/_helpers.tpl b/kubernetes/helm-charts/buildfarm/templates/_helpers.tpl new file mode 100644 index 0000000000..fe8826bb59 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/_helpers.tpl @@ -0,0 +1,85 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "buildfarm.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "buildfarm.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "buildfarm.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "buildfarm.labels" -}} +helm.sh/chart: {{ include "buildfarm.chart" . }} +{{ include "buildfarm.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "buildfarm.selectorLabels" -}} +app.kubernetes.io/name: {{ include "buildfarm.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "buildfarm.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "buildfarm.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create the APIsersion of the holizontal pod autoscaler +*/}} +{{- define "buildfarm.autoscalingVersion" -}} +{{- if (.Capabilities.APIVersions.Has "autoscaling/v2") -}} +autoscaling/v2 +{{- else if (.Capabilities.APIVersions.Has "autoscaling/v2beta2") -}} +autoscaling/v2beta2 +{{- else -}} +autoscaling/v1 +{{- end -}} +{{- end -}} + +{{/* Checks for `externalRedis` */}} +{{- if .Values.externalRedis.host }} + {{/* check if they are using externalRedis (the default value for `externalRedis.host` is "localhost") */}} + {{- if not (eq .Values.externalRedis.host "localhost") }} + {{- if .Values.redis.enabled }} + {{ required "If `externalRedis.host` is set, then `redis.enabled` should be `false`!" nil }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/helm-charts/buildfarm/templates/configmap.yaml b/kubernetes/helm-charts/buildfarm/templates/configmap.yaml new file mode 100644 index 0000000000..6b30f841a6 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/configmap.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "buildfarm.fullname" . }}-config +data: + config.yml: |- + {{- range $key, $value := .Values.config }} + {{- if kindIs "map" $value }} + {{- else }} + {{ $key }}: {{ $value }}{{- end }} + {{- end }} + backplane: + {{- if .Values.redis.enabled }} + redisUri: '{{ printf "redis://%s-redis-master.%s:6379" .Release.Name .Release.Namespace }}' + {{- else }} + redisUri: "{{ .Values.externalRedis.uri }}" + {{- end }} + {{- with .Values.config.backplane }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.config.server }} + server: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.config.worker }} + worker: + {{- toYaml . | nindent 6 }} + {{- end }} diff --git a/kubernetes/helm-charts/buildfarm/templates/server/deployment.yaml b/kubernetes/helm-charts/buildfarm/templates/server/deployment.yaml new file mode 100644 index 0000000000..577f697b13 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/server/deployment.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "buildfarm.fullname" . }}-server + labels: + name: {{ include "buildfarm.fullname" . }}-server + {{- include "buildfarm.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.server.replicaCount }} + selector: + matchLabels: + name: {{ include "buildfarm.fullname" . }}-server + {{- include "buildfarm.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/server-config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + name: {{ include "buildfarm.fullname" . }}-server + {{- include "buildfarm.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "buildfarm.serviceAccountName" . }} + containers: + - name: buildfarm-server + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.server.image.pullPolicy }} + env: + - name: CONFIG_PATH + value: /config/config.yml + {{- if .Values.server.extraEnv }} + {{- toYaml .Values.server.extraEnv | nindent 12 }} + {{- end }} + ports: + - containerPort: 8980 + name: "server-comm" + - containerPort: 9090 + name: "metrics" + livenessProbe: + httpGet: + path: / + port: metrics + readinessProbe: + httpGet: + path: / + port: metrics + resources: + {{- toYaml .Values.server.resources | nindent 12 }} + volumeMounts: + - mountPath: /config + name: config + readOnly: true + {{- with .Values.server.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.server.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.server.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - configMap: + defaultMode: 420 + name: {{ include "buildfarm.fullname" . }}-config + name: config diff --git a/kubernetes/helm-charts/buildfarm/templates/server/ingress.yaml b/kubernetes/helm-charts/buildfarm/templates/server/ingress.yaml new file mode 100644 index 0000000000..053f8be693 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/server/ingress.yaml @@ -0,0 +1,63 @@ +{{- $server := .Values.server -}} +{{- $ingress := $server.ingress -}} +{{- if $ingress.enabled -}} +{{- $fullName := include "buildfarm.fullname" . -}} +{{- $svcPort := $server.service.port -}} +{{- if and $ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey $ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set $ingress.annotations "kubernetes.io/ingress.class" $ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "buildfarm.labels" . | nindent 4 }} + {{- with $ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and $ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ $ingress.className }} + {{- end }} + {{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range $ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/kubernetes/helm-charts/buildfarm/templates/server/service.yaml b/kubernetes/helm-charts/buildfarm/templates/server/service.yaml new file mode 100644 index 0000000000..6079f92dc0 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/server/service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "buildfarm.fullname" . }}-server + labels: + name: {{ include "buildfarm.fullname" . }}-server + {{- include "buildfarm.labels" . | nindent 4 }} + {{- with .Values.server.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.server.service.type }} + ports: + - port: {{ .Values.server.service.port }} + targetPort: server-comm + protocol: TCP + name: gprc + - port: 9090 + targetPort: metrics + protocol: TCP + name: metrics + selector: + name: {{ include "buildfarm.fullname" . }}-server + {{- include "buildfarm.selectorLabels" . | nindent 4 }} diff --git a/kubernetes/helm-charts/buildfarm/templates/server/servicemonitor.yaml b/kubernetes/helm-charts/buildfarm/templates/server/servicemonitor.yaml new file mode 100644 index 0000000000..fe8a12b649 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/server/servicemonitor.yaml @@ -0,0 +1,37 @@ +{{- if .Values.server.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "buildfarm.fullname" . }}-server + labels: + {{- include "buildfarm.labels" . | nindent 4 }} +spec: + endpoints: + - port: "metrics" + {{- with .Values.server.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.server.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.server.serviceMonitor.path }} + scheme: {{ .Values.server.serviceMonitor.scheme }} + {{- with .Values.server.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + name: {{ include "buildfarm.fullname" . }}-server + {{- include "buildfarm.labels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + {{- with .Values.server.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/kubernetes/helm-charts/buildfarm/templates/serviceaccount.yaml b/kubernetes/helm-charts/buildfarm/templates/serviceaccount.yaml new file mode 100644 index 0000000000..f28779e3e4 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "buildfarm.serviceAccountName" . }} + labels: + {{- include "buildfarm.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/kubernetes/helm-charts/buildfarm/templates/shard-worker/autoscaler.yaml b/kubernetes/helm-charts/buildfarm/templates/shard-worker/autoscaler.yaml new file mode 100644 index 0000000000..fecd104fbc --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/shard-worker/autoscaler.yaml @@ -0,0 +1,30 @@ +{{- if .Values.shardWorker.autoscaling.enabled -}} +apiVersion: {{ include "buildfarm.autoscalingVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "buildfarm.fullname" . }}-shard-worker + labels: + name: {{ include "buildfarm.fullname" . }}-shard-worker + {{- include "buildfarm.labels" . | nindent 4 }} + {{- with .Values.shardWorker.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + maxReplicas: {{ .Values.shardWorker.autoscaling.maxReplicas }} + minReplicas: {{ .Values.shardWorker.autoscaling.minReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include "buildfarm.fullname" . }}-shard-worker + {{- if contains "autoscaling/v2" (include "buildfarm.autoscalingVersion" . ) }} + {{- if .Values.shardWorker.autoscaling.behavior }} + behavior: + {{- toYaml .Values.shardWorker.autoscaling.behavior | nindent 4 }} + {{- end }} + metrics: + {{- toYaml .Values.shardWorker.autoscaling.metrics | nindent 4 }} + {{- else }} + targetCPUUtilizationPercentage: {{ .Values.shardWorker.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/kubernetes/helm-charts/buildfarm/templates/shard-worker/service.yaml b/kubernetes/helm-charts/buildfarm/templates/shard-worker/service.yaml new file mode 100644 index 0000000000..135756bd5f --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/shard-worker/service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "buildfarm.fullname" . }}-shard-worker + labels: + name: {{ include "buildfarm.fullname" . }}-shard-worker + {{- include "buildfarm.labels" . | nindent 4 }} + {{- with .Values.shardWorker.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.shardWorker.service.type }} + ports: + - port: {{ .Values.shardWorker.service.port }} + targetPort: worker-comm + protocol: TCP + name: gprc + - port: 9090 + targetPort: metrics + protocol: TCP + name: metrics + selector: + name: {{ include "buildfarm.fullname" . }}-shard-worker + {{- include "buildfarm.selectorLabels" . | nindent 4 }} diff --git a/kubernetes/helm-charts/buildfarm/templates/shard-worker/servicemonitor.yaml b/kubernetes/helm-charts/buildfarm/templates/shard-worker/servicemonitor.yaml new file mode 100644 index 0000000000..8ff1a59a56 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/shard-worker/servicemonitor.yaml @@ -0,0 +1,37 @@ +{{- if .Values.shardWorker.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "buildfarm.fullname" . }}-shard-worker + labels: + {{- include "buildfarm.labels" . | nindent 4 }} +spec: + endpoints: + - port: "metrics" + {{- with .Values.shardWorker.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.shardWorker.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.shardWorker.serviceMonitor.path }} + scheme: {{ .Values.shardWorker.serviceMonitor.scheme }} + {{- with .Values.shardWorker.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + name: {{ include "buildfarm.fullname" . }}-shard-worker + {{- include "buildfarm.labels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + {{- with .Values.shardWorker.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} diff --git a/kubernetes/helm-charts/buildfarm/templates/shard-worker/statefulsets.yaml b/kubernetes/helm-charts/buildfarm/templates/shard-worker/statefulsets.yaml new file mode 100644 index 0000000000..8f1c6dc5c9 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/shard-worker/statefulsets.yaml @@ -0,0 +1,111 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "buildfarm.fullname" . }}-shard-worker + labels: + name: {{ include "buildfarm.fullname" . }}-shard-worker + {{- include "buildfarm.labels" . | nindent 4 }} +spec: + serviceName: {{ include "buildfarm.fullname" . }}-shard-worker + {{- if .Values.shardWorker.autoscaling.enabled }} + replicas: {{ .Values.shardWorker.autoscaling.minReplicas }} + {{- else }} + replicas: {{ .Values.shardWorker.replicaCount }} + {{- end }} + selector: + matchLabels: + name: {{ include "buildfarm.fullname" . }}-shard-worker + {{- include "buildfarm.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/worker-config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + name: {{ include "buildfarm.fullname" . }}-shard-worker + {{- include "buildfarm.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "buildfarm.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: buildfarm-worker + image: "{{ .Values.shardWorker.image.repository }}:{{ .Values.shardWorker.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.shardWorker.image.pullPolicy }} + args: + - --public_name=$(POD_IP):8982 + env: + - name: CONFIG_PATH + value: /config/config.yml + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + {{- if .Values.shardWorker.extraEnv }} + {{- toYaml .Values.shardWorker.extraEnv | nindent 12 }} + {{- end }} + ports: + - containerPort: 8981 + name: "worker-comm" + - containerPort: 9090 + name: "metrics" + livenessProbe: + httpGet: + path: / + port: metrics + readinessProbe: + httpGet: + path: / + port: metrics + resources: + {{- toYaml .Values.shardWorker.resources | nindent 12 }} + volumeMounts: + - mountPath: /config + name: config + readOnly: true + - mountPath: /tmp/worker + name: {{ include "buildfarm.fullname" . }}-shard-worker-data + {{- with .Values.shardWorker.extraVolumeMounts }} + {{- tpl (toYaml .) $ | nindent 12 -}} + {{- end }} + + {{- with .Values.shardWorker.nodeSelector }} + nodeSelector: + {{- tpl (toYaml .) $ | nindent 8 -}} + {{- end }} + + {{- with .Values.shardWorker.affinity }} + affinity: + {{- tpl (toYaml .) $ | nindent 8 -}} + {{- end }} + + {{- with .Values.shardWorker.tolerations }} + tolerations: + {{- tpl (toYaml .) $ | nindent 8 -}} + {{- end }} + volumes: + - configMap: + defaultMode: 420 + name: {{ include "buildfarm.fullname" . }}-config + name: config + {{- with .Values.shardWorker.extraVolumes }} + {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: {{ include "buildfarm.fullname" . }}-shard-worker-data + spec: + accessModes: ["ReadWriteOnce"] + {{- with .Values.shardWorker.storage.class }} + storageClassName: "{{ . }}" + {{- end }} + resources: + requests: + storage: "{{ .Values.shardWorker.storage.size }}" diff --git a/kubernetes/helm-charts/buildfarm/templates/tests/test-connection.yaml b/kubernetes/helm-charts/buildfarm/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..7aea6f1cfe --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "buildfarm.fullname" . }}-test-connection" + labels: + {{- include "buildfarm.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: curl + image: appropriate/curl:latest + command: ['curl'] + args: ['--output', '/dev/null', '{{ include "buildfarm.fullname" . }}-server:{{ .Values.server.service.port }}'] + restartPolicy: Never diff --git a/kubernetes/helm-charts/buildfarm/values.yaml b/kubernetes/helm-charts/buildfarm/values.yaml new file mode 100644 index 0000000000..d7fdb2ae32 --- /dev/null +++ b/kubernetes/helm-charts/buildfarm/values.yaml @@ -0,0 +1,198 @@ +# Default values for buildfarm. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +nameOverride: "" +fullnameOverride: "" + +imagePullSecrets: [] + +config: + # see: https://github.com/bazelbuild/bazel-buildfarm/blob/main/examples/config.yml + digestFunction: SHA256 + defaultActionTimeout: 600 + maximumActionTimeout: 3600 + maxEntrySizeBytes: "2147483648" # 2 * 1024 * 1024 * 1024 + prometheusPort: 9090 + backplane: + queues: + - name: "cpu" + allowUnmatched: true + properties: + - name: "min-cores" + value: "*" + - name: "max-cores" + value: "*" + server: + name: "shard" + recordBesEvents: true + worker: + port: 8982 + +server: + image: + repository: bazelbuild/buildfarm-server + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + replicaCount: 1 + resources: { } + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + service: + type: ClusterIP + port: 8980 + + ingress: + enabled: false + className: "" + annotations: { } + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [ ] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + nodeSelector: {} + tolerations: [] + affinity: {} + extraVolumes: [] + # - name: additionalSecret + # secret: + # secretName: my-secret + # defaultMode: 0600 + + extraVolumeMounts: [] + # - name: customConfig + # mountPath: /mnt/config + # readOnly: true + extraEnv: + - name: JAVABIN + value: "/usr/bin/java" + + serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + targetLabels: [] + +shardWorker: + image: + repository: bazelbuild/buildfarm-worker + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + replicaCount: 2 + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 4 + behavior: {} # effective only in `v2*` + metrics: # effective only in `v2*` + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + targetCPUUtilizationPercentage: 50 # effective only in `v1` + + resources: { } + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + storage: + # the storage class for pv, leave empty will using default + class: "" + size: 50Gi + + service: + type: ClusterIP + port: 8982 + + #nodeSelector: {} + #tolerations: [] + #affinity: {} + + #extraVolumes: + # - name: additionalSecret + # secret: + # secretName: my-secret + # defaultMode: 0600 + + #extraVolumeMounts: + # - name: customConfig + # mountPath: /mnt/config + # readOnly: true + + extraEnv: + - name: JAVABIN + value: "/usr/bin/java" + + serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + targetLabels: [] + +################################### +## DATABASE | Embedded Redis +################################### +redis: + ## - set to `false` if using `externalRedis.*` + ## + enabled: true + auth: + enabled: false + replica: + replicaCount: 1 + +externalRedis: + uri: "redis://localhost:6379" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" diff --git a/kubernetes/services/BUILD b/kubernetes/services/BUILD deleted file mode 100644 index 071e6344f7..0000000000 --- a/kubernetes/services/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_k8s//k8s:object.bzl", "k8s_object") - -k8s_object( - name = "redis-cluster", - kind = "service", - template = ":redis-cluster.yaml", -) - -k8s_object( - name = "shard-worker", - kind = "service", - template = ":shard-worker.yaml", -) - -k8s_object( - name = "open-telemetry", - kind = "service", - template = ":open-telemetry.yaml", -) - -k8s_object( - name = "jaeger", - kind = "service", - template = ":jaeger.yaml", -) - -k8s_object( - name = "grafana", - kind = "service", - template = ":grafana.yaml", -) diff --git a/kubernetes/services/grafana.yaml b/kubernetes/services/grafana.yaml deleted file mode 100644 index b87cbef235..0000000000 --- a/kubernetes/services/grafana.yaml +++ /dev/null @@ -1,82 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: grafana-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: grafana - name: grafana -spec: - selector: - matchLabels: - app: grafana - template: - metadata: - labels: - app: grafana - spec: - securityContext: - fsGroup: 472 - supplementalGroups: - - 0 - containers: - - name: grafana - image: grafana/grafana:8.2.5 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 3000 - name: http-grafana - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /robots.txt - port: 3000 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 2 - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - tcpSocket: - port: 3000 - timeoutSeconds: 1 - resources: - requests: - cpu: 250m - memory: 750Mi - volumeMounts: - - mountPath: /var/lib/grafana - name: grafana-pv - volumes: - - name: grafana-pv - persistentVolumeClaim: - claimName: grafana-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: grafana -spec: - ports: - - port: 3000 - protocol: TCP - targetPort: http-grafana - selector: - app: grafana - sessionAffinity: None - type: LoadBalancer \ No newline at end of file diff --git a/kubernetes/services/jaeger.yaml b/kubernetes/services/jaeger.yaml deleted file mode 100644 index d5900caf6b..0000000000 --- a/kubernetes/services/jaeger.yaml +++ /dev/null @@ -1,305 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - creationTimestamp: null - labels: - app: jaeger - app.kubernetes.io/component: service-account - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest - app.kubernetes.io/part-of: jaeger - name: simplest ---- -apiVersion: v1 -data: - sampling: '{"default_strategy":{"param":1,"type":"probabilistic"}}' -kind: ConfigMap -metadata: - creationTimestamp: null - labels: - app: jaeger - app.kubernetes.io/component: sampling-configuration - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest-sampling-configuration - app.kubernetes.io/part-of: jaeger - name: simplest-sampling-configuration ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - creationTimestamp: null - labels: - app: jaeger - app.kubernetes.io/component: query-ingress - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest-query - app.kubernetes.io/part-of: jaeger - name: simplest-query -spec: - defaultBackend: - service: - name: simplest-query - port: - number: 16686 -status: - loadBalancer: {} ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - prometheus.io/scrape: "false" - service.beta.openshift.io/serving-cert-secret-name: simplest-collector-headless-tls - creationTimestamp: null - labels: - app: jaeger - app.kubernetes.io/component: service-collector - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest-collector - app.kubernetes.io/part-of: jaeger - name: simplest-collector-headless -spec: - clusterIP: None - ports: - - name: http-zipkin - port: 9411 - targetPort: 0 - - name: grpc-http - port: 14250 - targetPort: 0 - - name: c-tchan-trft - port: 14267 - targetPort: 0 - - name: http-c-binary-trft - port: 14268 - targetPort: 0 - selector: - app: jaeger - app.kubernetes.io/component: all-in-one - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest - app.kubernetes.io/part-of: jaeger -status: - loadBalancer: {} ---- -apiVersion: v1 -kind: Service -metadata: - creationTimestamp: null - labels: - app: jaeger - app.kubernetes.io/component: service-collector - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest-collector - app.kubernetes.io/part-of: jaeger - name: simplest-collector -spec: - ports: - - name: http-zipkin - port: 9411 - targetPort: 0 - - name: grpc-http - port: 14250 - targetPort: 0 - - name: c-tchan-trft - port: 14267 - targetPort: 0 - - name: http-c-binary-trft - port: 14268 - targetPort: 0 - selector: - app: jaeger - app.kubernetes.io/component: all-in-one - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest - app.kubernetes.io/part-of: jaeger - type: ClusterIP -status: - loadBalancer: {} ---- -apiVersion: v1 -kind: Service -metadata: - creationTimestamp: null - labels: - app: jaeger - app.kubernetes.io/component: service-query - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest-query - app.kubernetes.io/part-of: jaeger - name: simplest-query -spec: - ports: - - name: http-query - port: 16686 - targetPort: 16686 - - name: grpc-query - port: 16685 - targetPort: 16685 - selector: - app: jaeger - app.kubernetes.io/component: all-in-one - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest - app.kubernetes.io/part-of: jaeger - type: ClusterIP -status: - loadBalancer: {} ---- -apiVersion: v1 -kind: Service -metadata: - creationTimestamp: null - labels: - app: jaeger - app.kubernetes.io/component: service-agent - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest-agent - app.kubernetes.io/part-of: jaeger - name: simplest-agent -spec: - clusterIP: None - ports: - - name: zk-compact-trft - port: 5775 - protocol: UDP - targetPort: 0 - - name: config-rest - port: 5778 - targetPort: 0 - - name: jg-compact-trft - port: 6831 - protocol: UDP - targetPort: 0 - - name: jg-binary-trft - port: 6832 - protocol: UDP - targetPort: 0 - selector: - app: jaeger - app.kubernetes.io/component: all-in-one - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest - app.kubernetes.io/part-of: jaeger -status: - loadBalancer: {} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/inject: disabled - prometheus.io/port: "14269" - prometheus.io/scrape: "true" - sidecar.istio.io/inject: "false" - creationTimestamp: null - labels: - app: jaeger - app.kubernetes.io/component: all-in-one - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest - app.kubernetes.io/part-of: jaeger - name: simplest -spec: - selector: - matchLabels: - app: jaeger - app.kubernetes.io/component: all-in-one - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest - app.kubernetes.io/part-of: jaeger - strategy: - type: Recreate - template: - metadata: - annotations: - linkerd.io/inject: disabled - prometheus.io/port: "14269" - prometheus.io/scrape: "true" - sidecar.istio.io/inject: "false" - creationTimestamp: null - labels: - app: jaeger - app.kubernetes.io/component: all-in-one - app.kubernetes.io/instance: simplest - app.kubernetes.io/managed-by: jaeger-operator - app.kubernetes.io/name: simplest - app.kubernetes.io/part-of: jaeger - spec: - containers: - - args: - - --sampling.strategies-file=/etc/jaeger/sampling/sampling.json - env: - - name: SPAN_STORAGE_TYPE - value: memory - - name: COLLECTOR_ZIPKIN_HOST_PORT - value: :9411 - - name: JAEGER_DISABLED - value: "false" - image: jaegertracing/all-in-one:1.30.0 - livenessProbe: - failureThreshold: 5 - httpGet: - path: / - port: 14269 - initialDelaySeconds: 5 - periodSeconds: 15 - name: jaeger - ports: - - containerPort: 5775 - name: zk-compact-trft - protocol: UDP - - containerPort: 5778 - name: config-rest - - containerPort: 6831 - name: jg-compact-trft - protocol: UDP - - containerPort: 6832 - name: jg-binary-trft - protocol: UDP - - containerPort: 9411 - name: zipkin - - containerPort: 14267 - name: c-tchan-trft - - containerPort: 14268 - name: c-binary-trft - - containerPort: 16686 - name: query - - containerPort: 14269 - name: admin-http - - containerPort: 14250 - name: grpc - readinessProbe: - httpGet: - path: / - port: 14269 - initialDelaySeconds: 1 - resources: {} - volumeMounts: - - mountPath: /etc/jaeger/sampling - name: simplest-sampling-configuration-volume - readOnly: true - enableServiceLinks: false - serviceAccountName: simplest - volumes: - - configMap: - items: - - key: sampling - path: sampling.json - name: simplest-sampling-configuration - name: simplest-sampling-configuration-volume -status: {} diff --git a/kubernetes/services/open-telemetry.yaml b/kubernetes/services/open-telemetry.yaml deleted file mode 100644 index 494b8504e5..0000000000 --- a/kubernetes/services/open-telemetry.yaml +++ /dev/null @@ -1,218 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: otel-agent-conf - labels: - app: opentelemetry - component: otel-agent-conf -data: - otel-agent-config: | - receivers: - otlp: - protocols: - grpc: - http: - exporters: - otlp: - endpoint: "otel-collector.default:4317" - tls: - insecure: true - sending_queue: - num_consumers: 4 - queue_size: 100 - retry_on_failure: - enabled: true - processors: - batch: - memory_limiter: - # 80% of maximum memory up to 2G - limit_mib: 400 - # 25% of limit up to 2G - spike_limit_mib: 100 - check_interval: 5s - extensions: - zpages: {} - memory_ballast: - # Memory Ballast size should be max 1/3 to 1/2 of memory. - size_mib: 165 - service: - extensions: [zpages, memory_ballast] - pipelines: - traces: - receivers: [otlp] - processors: [memory_limiter, batch] - exporters: [otlp] ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: otel-agent - labels: - app: opentelemetry - component: otel-agent -spec: - selector: - matchLabels: - app: opentelemetry - component: otel-agent - template: - metadata: - labels: - app: opentelemetry - component: otel-agent - spec: - containers: - - command: - - "/otelcol" - - "--config=/conf/otel-agent-config.yaml" - image: otel/opentelemetry-collector:0.38.0 - name: otel-agent - resources: - limits: - cpu: 500m - memory: 500Mi - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 55679 # ZPages endpoint. - - containerPort: 4317 # Default OpenTelemetry receiver port. - - containerPort: 8888 # Metrics. - volumeMounts: - - name: otel-agent-config-vol - mountPath: /conf - volumes: - - configMap: - name: otel-agent-conf - items: - - key: otel-agent-config - path: otel-agent-config.yaml - name: otel-agent-config-vol ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: otel-collector-conf - labels: - app: opentelemetry - component: otel-collector-conf -data: - otel-collector-config: | - receivers: - otlp: - protocols: - grpc: - http: - processors: - batch: - memory_limiter: - # 80% of maximum memory up to 2G - limit_mib: 1500 - # 25% of limit up to 2G - spike_limit_mib: 512 - check_interval: 5s - extensions: - zpages: {} - memory_ballast: - # Memory Ballast size should be max 1/3 to 1/2 of memory. - size_mib: 683 - exporters: - otlp: - endpoint: "http://someotlp.target.com:4317" # Replace with a real endpoint. - tls: - insecure: true - jaeger: - endpoint: "simplest-collector:14250" - tls: - insecure: true - service: - extensions: [zpages, memory_ballast] - pipelines: - traces/1: - receivers: [otlp] - processors: [memory_limiter, batch] - exporters: [jaeger] ---- -apiVersion: v1 -kind: Service -metadata: - name: otel-collector - labels: - app: opentelemetry - component: otel-collector -spec: - ports: - - name: otlp-grpc # Default endpoint for OpenTelemetry gRPC receiver. - port: 4317 - protocol: TCP - targetPort: 4317 - - name: otlp-http # Default endpoint for OpenTelemetry HTTP receiver. - port: 4318 - protocol: TCP - targetPort: 4318 - - name: metrics # Default endpoint for querying metrics. - port: 8888 - selector: - component: otel-collector ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: otel-collector - labels: - app: opentelemetry - component: otel-collector -spec: - selector: - matchLabels: - app: opentelemetry - component: otel-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 - replicas: 1 #TODO - adjust this to your own requirements - template: - metadata: - labels: - app: opentelemetry - component: otel-collector - spec: - containers: - - command: - - "/otelcol" - - "--config=/conf/otel-collector-config.yaml" - image: otel/opentelemetry-collector:0.38.0 - name: otel-collector - resources: - limits: - cpu: 1 - memory: 2Gi - requests: - cpu: 200m - memory: 400Mi - ports: - - containerPort: 55679 # Default endpoint for ZPages. - - containerPort: 4317 # Default endpoint for OpenTelemetry receiver. - - containerPort: 14250 # Default endpoint for Jaeger gRPC receiver. - - containerPort: 14268 # Default endpoint for Jaeger HTTP receiver. - - containerPort: 9411 # Default endpoint for Zipkin receiver. - - containerPort: 8888 # Default endpoint for querying metrics. - volumeMounts: - - name: otel-collector-config-vol - mountPath: /conf -# - name: otel-collector-secrets -# mountPath: /secrets - volumes: - - configMap: - name: otel-collector-conf - items: - - key: otel-collector-config - path: otel-collector-config.yaml - name: otel-collector-config-vol -# - secret: -# name: otel-collector-secrets -# items: -# - key: cert.pem -# path: cert.pem -# - key: key.pem -# path: key.pem \ No newline at end of file diff --git a/kubernetes/services/redis-cluster.yaml b/kubernetes/services/redis-cluster.yaml deleted file mode 100644 index 116dbcea39..0000000000 --- a/kubernetes/services/redis-cluster.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: redis-cluster-service -spec: - selector: - name: redis-cluster - ports: - - protocol: TCP - port: 6379 - targetPort: 6379 diff --git a/kubernetes/services/shard-worker.yaml b/kubernetes/services/shard-worker.yaml deleted file mode 100644 index 0e89e0f313..0000000000 --- a/kubernetes/services/shard-worker.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: shard-worker-service -spec: - selector: - name: shard-worker - ports: - - name: "communication" - protocol: TCP - port: 8981 - targetPort: 8981 - - name: "metrics" - protocol: TCP - port: 9091 - targetPort: 9091 - diff --git a/maven_install.json b/maven_install.json new file mode 100755 index 0000000000..41a9f2a0e9 --- /dev/null +++ b/maven_install.json @@ -0,0 +1,4504 @@ +{ + "__AUTOGENERATED_FILE_DO_NOT_MODIFY_THIS_FILE_MANUALLY": "THERE_IS_NO_DATA_ONLY_ZUUL", + "__INPUT_ARTIFACTS_HASH": 1128517071, + "__RESOLVED_ARTIFACTS_HASH": 1494115846, + "conflict_resolution": { + "com.fasterxml.jackson.core:jackson-databind:2.15.0": "com.fasterxml.jackson.core:jackson-databind:2.15.2", + "com.github.ben-manes.caffeine:caffeine:2.9.0": "com.github.ben-manes.caffeine:caffeine:3.0.5", + "com.google.code.gson:gson:2.8.9": "com.google.code.gson:gson:2.10.1", + "com.google.errorprone:error_prone_annotations:2.22.0": "com.google.errorprone:error_prone_annotations:2.23.0", + "com.google.errorprone:error_prone_annotations:2.3.2": "com.google.errorprone:error_prone_annotations:2.23.0", + "com.google.guava:guava:31.1-jre": "com.google.guava:guava:32.1.3-jre", + "com.google.guava:guava:32.1.1-jre": "com.google.guava:guava:32.1.3-jre", + "com.google.j2objc:j2objc-annotations:1.3": "com.google.j2objc:j2objc-annotations:2.8", + "com.google.protobuf:protobuf-java-util:3.19.1": "com.google.protobuf:protobuf-java-util:3.25.1", + "com.google.protobuf:protobuf-java:3.19.1": "com.google.protobuf:protobuf-java:3.25.1", + "com.google.truth:truth:1.1.2": "com.google.truth:truth:1.1.5", + "io.netty:netty-buffer:4.1.97.Final": "io.netty:netty-buffer:4.1.100.Final", + "io.netty:netty-codec-http2:4.1.97.Final": "io.netty:netty-codec-http2:4.1.100.Final", + "io.netty:netty-codec-http:4.1.97.Final": "io.netty:netty-codec-http:4.1.100.Final", + "io.netty:netty-codec-socks:4.1.97.Final": "io.netty:netty-codec-socks:4.1.100.Final", + "io.netty:netty-codec:4.1.97.Final": "io.netty:netty-codec:4.1.100.Final", + "io.netty:netty-common:4.1.97.Final": "io.netty:netty-common:4.1.100.Final", + "io.netty:netty-handler-proxy:4.1.97.Final": "io.netty:netty-handler-proxy:4.1.100.Final", + "io.netty:netty-handler:4.1.97.Final": "io.netty:netty-handler:4.1.100.Final", + "io.netty:netty-resolver:4.1.97.Final": "io.netty:netty-resolver:4.1.100.Final", + "io.netty:netty-transport-native-unix-common:4.1.97.Final": "io.netty:netty-transport-native-unix-common:4.1.100.Final", + "io.netty:netty-transport:4.1.97.Final": "io.netty:netty-transport:4.1.100.Final", + "org.apache.commons:commons-pool2:2.11.1": "org.apache.commons:commons-pool2:2.12.0", + "org.mockito:mockito-core:4.3.1": "org.mockito:mockito-core:5.10.0" + }, + "artifacts": { + "aopalliance:aopalliance": { + "shasums": { + "jar": "0addec670fedcd3f113c5c8091d783280d23f75e3acb841b61a9cdb079376a08" + }, + "version": "1.0" + }, + "com.amazonaws:aws-java-sdk-core": { + "shasums": { + "jar": "79682855ea21bd65094ad97109f9b3e4361d3e02926f5ee14ade3411c7ca43da" + }, + "version": "1.12.544" + }, + "com.amazonaws:aws-java-sdk-kms": { + "shasums": { + "jar": "a79a3768887ea675f2e7b617b361d5250b2128413dbd5d8fa43755a9ecc1b032" + }, + "version": "1.12.544" + }, + "com.amazonaws:aws-java-sdk-s3": { + "shasums": { + "jar": "817b2fac490d3e02ecaf3253c2e2ab0bf6d2291a841574cec70464312d669230" + }, + "version": "1.12.544" + }, + "com.amazonaws:aws-java-sdk-secretsmanager": { + "shasums": { + "jar": "b6a0953948949282b46769896c9d1eb1660ed77632c52137fdb72b8372fe685e" + }, + "version": "1.12.544" + }, + "com.amazonaws:jmespath-java": { + "shasums": { + "jar": "b707d67e8fcc87ffdf426bbe61bbe60ae97e865d35d6cec429a934d47fa2976c" + }, + "version": "1.12.544" + }, + "com.esotericsoftware:kryo": { + "shasums": { + "jar": "4b902a21d99f7b4c32e6f7400e91f9284fd184db881bb9e18328e14d8127f7f9" + }, + "version": "5.5.0" + }, + "com.esotericsoftware:minlog": { + "shasums": { + "jar": "5d4d632cfbebfe0a7644501cc303570b691406181bee65e9916b921c767d7c72" + }, + "version": "1.3.1" + }, + "com.esotericsoftware:reflectasm": { + "shasums": { + "jar": "712b44da79a5b7f47a28cbfcb3d8ecfc872fae349c48aa4d3e38a5d69956afce" + }, + "version": "1.11.9" + }, + "com.fasterxml.jackson.core:jackson-annotations": { + "shasums": { + "jar": "04e21f94dcfee4b078fa5a5f53047b785aaba69d19de392f616e7a7fe5d3882f" + }, + "version": "2.15.2" + }, + "com.fasterxml.jackson.core:jackson-core": { + "shasums": { + "jar": "303c99e82b1faa91a0bae5d8fbeb56f7e2adf9b526a900dd723bf140d62bd4b4" + }, + "version": "2.15.2" + }, + "com.fasterxml.jackson.core:jackson-databind": { + "shasums": { + "jar": "0eb2fdad6e40ab8832a78c9b22f58196dd970594e8d3d5a26ead87847c4f3a96" + }, + "version": "2.15.2" + }, + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { + "shasums": { + "jar": "cfa008d15f052e69221e8c3193056ff95c3c594271321ccac8d72dc1a770619c" + }, + "version": "2.12.6" + }, + "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { + "shasums": { + "jar": "37795cc1e8cb94b18d860dc3abd2e593617ce402149ae45aa89ed8bfb881c851" + }, + "version": "2.15.2" + }, + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base": { + "shasums": { + "jar": "3b6b74311d094990e6d8de356363988050fb2bf5389138b198b01a0ceb9a9668" + }, + "version": "2.10.3" + }, + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider": { + "shasums": { + "jar": "93026591dbb332030dbe865b9c811a016e470d8ff6daaa7031556d2185e62054" + }, + "version": "2.10.3" + }, + "com.fasterxml.jackson.module:jackson-module-jaxb-annotations": { + "shasums": { + "jar": "8099caad4ae189525ef94d337d72d3e888abefabbbacbc9f3d2f096d534f2fb5" + }, + "version": "2.10.3" + }, + "com.github.ben-manes.caffeine:caffeine": { + "shasums": { + "jar": "8a9b54d3506a3b92ee46b217bcee79196b21ca6d52dc2967c686a205fb2f9c15" + }, + "version": "3.0.5" + }, + "com.github.docker-java:docker-java": { + "shasums": { + "jar": "3afb208216a17d8ce26a8f689303292701c87b974d43780cfba47bb2199a4467" + }, + "version": "3.3.3" + }, + "com.github.docker-java:docker-java-api": { + "shasums": { + "jar": "8be2f41ddc33306b83f91e413fc1a07cee02db05e4c493456de3399e5bcb7b6c" + }, + "version": "3.3.3" + }, + "com.github.docker-java:docker-java-core": { + "shasums": { + "jar": "d1f60040b4666a6073d4a2e0b72fc86cfb1b77f36b093e46a4115ea255995267" + }, + "version": "3.3.3" + }, + "com.github.docker-java:docker-java-transport": { + "shasums": { + "jar": "103b94685f398b036cf9243cb8899680bcdb4e54c32340a32b2b5737a87a33ba" + }, + "version": "3.3.3" + }, + "com.github.docker-java:docker-java-transport-jersey": { + "shasums": { + "jar": "7574d831272a56268f4468b901059cafdca6e10176c87fec83f65d26d28c6fb0" + }, + "version": "3.3.3" + }, + "com.github.docker-java:docker-java-transport-netty": { + "shasums": { + "jar": "30152706a19f46f97bea55e85182762d8b5d2d23bea5e465af403537677f879b" + }, + "version": "3.3.3" + }, + "com.github.fppt:jedis-mock": { + "shasums": { + "jar": "2588555326a117a8120b43e388483caef8fd52c0fc96f2badec5aa59c26dd521" + }, + "version": "1.0.13" + }, + "com.github.jnr:jffi": { + "shasums": { + "jar": "74d3bce7397b4872ccb6a6fd84b8f260503f76509adc9548029f665852ad38d7", + "native": "f4c26c0a4a3eddfdbaaf4dda77d4d9f7d148ba4208798f32ce475ce4d6778744" + }, + "version": "1.3.11" + }, + "com.github.jnr:jnr-a64asm": { + "shasums": { + "jar": "53ae5ea7fa5c284e8279aa348e7b9de4548b0cae10bfd058fa217c791875e4cf" + }, + "version": "1.0.0" + }, + "com.github.jnr:jnr-constants": { + "shasums": { + "jar": "9a5b8cf9798d9d0331b8d8966c5235a22c4307676e35803a24659e6d76096f78" + }, + "version": "0.10.4" + }, + "com.github.jnr:jnr-ffi": { + "shasums": { + "jar": "01fafe177b1e3136b3789aeb0ff0884ae1e24b5ada711192f67084103697f2d4" + }, + "version": "2.2.14" + }, + "com.github.jnr:jnr-posix": { + "shasums": { + "jar": "9e24abedd700a1d8f0a2787566f2d0c4f3e4fbdb8be543d4b434ce445923c757" + }, + "version": "3.1.17" + }, + "com.github.jnr:jnr-x86asm": { + "shasums": { + "jar": "39f3675b910e6e9b93825f8284bec9f4ad3044cd20a6f7c8ff9e2f8695ebf21e" + }, + "version": "1.0.2" + }, + "com.github.kevinstern:software-and-algorithms": { + "shasums": { + "jar": "61ab82439cef37343b14f53154c461619375373a56b9338e895709fb54e0864c" + }, + "version": "1.0" + }, + "com.github.luben:zstd-jni": { + "shasums": { + "jar": "edd7fc60c2aaa6b77d3436f667bf30b06202633761ec20d683638b40e8f11426" + }, + "version": "1.5.5-7" + }, + "com.github.oshi:oshi-core": { + "shasums": { + "jar": "7e634fb57b8763b7803d5f9caaed46d19c3bdbe81ddd8a93e61528c700cdc09e" + }, + "version": "6.4.5" + }, + "com.github.pcj:google-options": { + "shasums": { + "jar": "f1f84449b46390a7fa73aac0b5acdec4312d6174146af0db1c92425c7005fdce" + }, + "version": "1.0.0" + }, + "com.github.serceman:jnr-fuse": { + "shasums": { + "jar": "ebe81ccbcbe1464996e5213ee24947cfba9eda7e9ffe154333f9bd8321217989" + }, + "version": "0.5.7" + }, + "com.google.android:annotations": { + "shasums": { + "jar": "ba734e1e84c09d615af6a09d33034b4f0442f8772dec120efb376d86a565ae15" + }, + "version": "4.1.1.4" + }, + "com.google.api.grpc:proto-google-common-protos": { + "shasums": { + "jar": "ee9c751f06b112e92b37f75e4f73a17d03ef2c3302c6e8d986adbcc721b63cb0" + }, + "version": "2.29.0" + }, + "com.google.auth:google-auth-library-credentials": { + "shasums": { + "jar": "095984b0594888a47f311b3c9dcf6da9ed86feeea8f78140c55e14c27b0593e5" + }, + "version": "1.19.0" + }, + "com.google.auth:google-auth-library-oauth2-http": { + "shasums": { + "jar": "01bdf5c5cd85e10b794e401775d9909b56a38ffce313fbd39510a5d87ed56f58" + }, + "version": "1.19.0" + }, + "com.google.auto.service:auto-service-annotations": { + "shasums": { + "jar": "c7bec54b7b5588b5967e870341091c5691181d954cf2039f1bf0a6eeb837473b" + }, + "version": "1.0.1" + }, + "com.google.auto.value:auto-value-annotations": { + "shasums": { + "jar": "a4fe0a211925e938a8510d741763ee1171a11bf931f5891ef4d4ee84fca72be2" + }, + "version": "1.10.1" + }, + "com.google.auto:auto-common": { + "shasums": { + "jar": "f43f29fe2a6ebaf04b2598cdeec32a4e346d49a9404e990f5fc19c19f3a28d0e" + }, + "version": "1.2.1" + }, + "com.google.code.findbugs:jsr305": { + "shasums": { + "jar": "766ad2a0783f2687962c8ad74ceecc38a28b9f72a2d085ee438b7813e928d0c7" + }, + "version": "3.0.2" + }, + "com.google.code.gson:gson": { + "shasums": { + "jar": "4241c14a7727c34feea6507ec801318a3d4a90f070e4525681079fb94ee4c593" + }, + "version": "2.10.1" + }, + "com.google.errorprone:error_prone_annotation": { + "shasums": { + "jar": "554c42449c9920ea1f6baec1d1b8aaac404a88be653f7cb441ee059316f8a1d1" + }, + "version": "2.22.0" + }, + "com.google.errorprone:error_prone_annotations": { + "shasums": { + "jar": "ec6f39f068b6ff9ac323c68e28b9299f8c0a80ca512dccb1d4a70f40ac3ec054" + }, + "version": "2.23.0" + }, + "com.google.errorprone:error_prone_check_api": { + "shasums": { + "jar": "1717bbf65757b8e1a83f3b0aa78c5ac25a6493008bc730091d404cf798fc0639" + }, + "version": "2.22.0" + }, + "com.google.errorprone:error_prone_core": { + "shasums": { + "jar": "32a3df226a9a47f48dd895a9a89678d50ac404282c33400781c38757e8143f2c" + }, + "version": "2.22.0" + }, + "com.google.errorprone:error_prone_type_annotations": { + "shasums": { + "jar": "6618b1d28df562622b77187b5c6dfc9c4c97851af73bd64dc0300efe9a439b20" + }, + "version": "2.22.0" + }, + "com.google.guava:failureaccess": { + "shasums": { + "jar": "a171ee4c734dd2da837e4b16be9df4661afab72a41adaf31eb84dfdaf936ca26" + }, + "version": "1.0.1" + }, + "com.google.guava:guava": { + "shasums": { + "jar": "6d4e2b5a118aab62e6e5e29d185a0224eed82c85c40ac3d33cf04a270c3b3744" + }, + "version": "32.1.3-jre" + }, + "com.google.guava:guava-testlib": { + "shasums": { + "jar": "aadc71b10d5c3ac474dd16be84cfb18d257e584d1e0a59f8cab64ef4376226ce" + }, + "version": "31.1-jre" + }, + "com.google.guava:listenablefuture": { + "shasums": { + "jar": "b372a037d4230aa57fbeffdef30fd6123f9c0c2db85d0aced00c91b974f33f99" + }, + "version": "9999.0-empty-to-avoid-conflict-with-guava" + }, + "com.google.http-client:google-http-client": { + "shasums": { + "jar": "e395dd1765e3e6bceb0c610706bcf4128de84bd6e65cf1d4adbf998b4114161c" + }, + "version": "1.42.3" + }, + "com.google.http-client:google-http-client-gson": { + "shasums": { + "jar": "8196efaa89c5f73b00b2b48edad02fcd78524259407c37ab1860737988545cee" + }, + "version": "1.42.3" + }, + "com.google.inject:guice": { + "shasums": { + "jar": "4130e50bfac48099c860f0d903b91860c81a249c90f38245f8fed58fc817bc26" + }, + "version": "5.1.0" + }, + "com.google.j2objc:j2objc-annotations": { + "shasums": { + "jar": "f02a95fa1a5e95edb3ed859fd0fb7df709d121a35290eff8b74dce2ab7f4d6ed" + }, + "version": "2.8" + }, + "com.google.jimfs:jimfs": { + "shasums": { + "jar": "82494408bb513f5512652e7b7f63d6f31f01eff57ce35c878644ffc2d25aee4f" + }, + "version": "1.3.0" + }, + "com.google.protobuf:protobuf-java": { + "shasums": { + "jar": "48a8e58a1a8f82eff141a7a388d38dfe77d7a48d5e57c9066ee37f19147e20df" + }, + "version": "3.25.1" + }, + "com.google.protobuf:protobuf-java-util": { + "shasums": { + "jar": "faf398ad0fe8c5a7d867f76d322e2e71bb31898fe86ec3223f787a6ed6fb4622" + }, + "version": "3.25.1" + }, + "com.google.truth:truth": { + "shasums": { + "jar": "7f6d50d6f43a102942ef2c5a05f37a84f77788bb448cf33cceebf86d34e575c0" + }, + "version": "1.1.5" + }, + "com.googlecode.json-simple:json-simple": { + "shasums": { + "jar": "4e69696892b88b41c55d49ab2fdcc21eead92bf54acc588c0050596c3b75199c" + }, + "version": "1.1.1" + }, + "com.jayway.jsonpath:json-path": { + "shasums": { + "jar": "9601707e95cd79fb98570a01ea8cfb857b5cde948744d6e0edf733c11002c95b" + }, + "version": "2.8.0" + }, + "com.kohlschutter.junixsocket:junixsocket-common": { + "shasums": { + "jar": "93d120e2d49ddf5bfdee8258762fc874b26c657f027f8d6ccc1a055156bfcde1" + }, + "version": "2.6.1" + }, + "com.kohlschutter.junixsocket:junixsocket-native-common": { + "shasums": { + "jar": "61fbbd6cfd2b6df65c0e7b19b16ff4f755d6cb1d333b566f4286407f12f18670" + }, + "version": "2.6.1" + }, + "com.sun.activation:jakarta.activation": { + "shasums": { + "jar": "d84d4ba8b55cdb7fdcbb885e6939386367433f56f5ab8cfdc302a7c3587fa92b" + }, + "version": "1.2.1" + }, + "commons-codec:commons-codec": { + "shasums": { + "jar": "b3e9f6d63a790109bf0d056611fbed1cf69055826defeb9894a71369d246ed63" + }, + "version": "1.15" + }, + "commons-io:commons-io": { + "shasums": { + "jar": "f877d304660ac2a142f3865badfc971dec7ed73c747c7f8d5d2f5139ca736513" + }, + "version": "2.6" + }, + "commons-logging:commons-logging": { + "shasums": { + "jar": "daddea1ea0be0f56978ab3006b8ac92834afeefbd9b7e4e6316fca57df0fa636" + }, + "version": "1.2" + }, + "io.github.eisop:dataflow-errorprone": { + "shasums": { + "jar": "89b4f5d2bd5059f067c5982a0e5988b87dfc8a8234795d68c6f3178846de3319" + }, + "version": "3.34.0-eisop1" + }, + "io.github.java-diff-utils:java-diff-utils": { + "shasums": { + "jar": "9990a2039778f6b4cc94790141c2868864eacee0620c6c459451121a901cd5b5" + }, + "version": "4.12" + }, + "io.grpc:grpc-api": { + "shasums": { + "jar": "2e896944cf513e0e5cfd32bcd72c89601a27c6ca56916f84b20f3a13bacf1b1f" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-auth": { + "shasums": { + "jar": "6a16c43d956c79190486d3d0b951836a6706b3282b5d275a9bc4d33eb79d5618" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-context": { + "shasums": { + "jar": "9959747df6a753119e1c1a3dff01aa766d2455f5e4860acaa305359e1d533a05" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-core": { + "shasums": { + "jar": "18439902c473a2c1511e517d13b8ae796378850a8eda43787c6ba778fa90fcc5" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-inprocess": { + "shasums": { + "jar": "f3c28a9d7f13fa995e4dd89e4f6aa08fa3b383665314fdfccb9f87f346625df7" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-netty": { + "shasums": { + "jar": "6060217fe26a9f8b2d899d02d95c9b52513be774233326ee43a6b8433edb03c8" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-netty-shaded": { + "shasums": { + "jar": "b3f1823ef30ca02ac721020f4b6492248efdbd0548c78e893d5d245cbca2cc60" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-protobuf": { + "shasums": { + "jar": "66a0b196318bdfd817d965d2d82b9c81dfced8eb08c0f7510fcb728d2994237a" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-protobuf-lite": { + "shasums": { + "jar": "79997989a8c2b5bf4dd18182a2df2e2f668703d68ba7c317e7a07809d33f91f4" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-services": { + "shasums": { + "jar": "72f6eba0670184b634e7dcde0b97cde378a7cd74cdf63300f453d15c23bbbb6a" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-stub": { + "shasums": { + "jar": "fb4ca679a4214143406c65ac4167b2b5e2ee2cab1fc101566bb1c4695d105e36" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-testing": { + "shasums": { + "jar": "a952fed1a1b43569ececd832ed820bd149a6f214905a9c7d4fc8853dfd553df4" + }, + "version": "1.62.2" + }, + "io.grpc:grpc-util": { + "shasums": { + "jar": "3c7103e6f3738571e3aeda420fe2a6ac68e354534d8b66f41897b6755b48b735" + }, + "version": "1.62.2" + }, + "io.netty:netty-buffer": { + "shasums": { + "jar": "462874b44ee782fbefec64078cda6eb8e7bf9f0e0af71a928ef4c1f2d564f7ee" + }, + "version": "4.1.100.Final" + }, + "io.netty:netty-codec": { + "shasums": { + "jar": "180a01ed67af399602e24ff1c32864e7f57f57c4a0fa5e9ab3fe9b0e5e9cf051" + }, + "version": "4.1.100.Final" + }, + "io.netty:netty-codec-dns": { + "shasums": { + "jar": "857d0213bd4e504ad897a7c0f967ef3f728f120feea3e824729dad525b44bbce" + }, + "version": "4.1.96.Final" + }, + "io.netty:netty-codec-http": { + "shasums": { + "jar": "326811d249cb0e5555e78e026e877834e792261c38f0666d80464426695d9590" + }, + "version": "4.1.100.Final" + }, + "io.netty:netty-codec-http2": { + "shasums": { + "jar": "23b4a74350f4cf8d41b93fb93d52b5050667d8d53fffc385672c86eab83b8749" + }, + "version": "4.1.100.Final" + }, + "io.netty:netty-codec-socks": { + "shasums": { + "jar": "608a453b90f8384ba4efcdc6db7f899a1f10b9ea1890954696e6cfac45ff1ba9" + }, + "version": "4.1.100.Final" + }, + "io.netty:netty-common": { + "shasums": { + "jar": "d2908301f1ac6f2910900742473c15d701765d3d4467acdb1eebb9df3aa82885" + }, + "version": "4.1.100.Final" + }, + "io.netty:netty-handler": { + "shasums": { + "jar": "0e10e584c2e7fdf7f4804e14760ed987003f1b62ab982f62eaf13a9892793d3a" + }, + "version": "4.1.100.Final" + }, + "io.netty:netty-handler-proxy": { + "shasums": { + "jar": "686dbc2e61407f216d6cb267dd7954896f851dd34b58be3e757c5a89f20a5e67" + }, + "version": "4.1.100.Final" + }, + "io.netty:netty-resolver": { + "shasums": { + "jar": "c42c481c776e9d367a45cc3a67a06f65897d280334eb30b2362b8c55b7523f4f" + }, + "version": "4.1.100.Final" + }, + "io.netty:netty-resolver-dns": { + "shasums": { + "jar": "09a4f0cc4fc7af083515cfb84d6e70af4223dfe129858274cf506cc626f5175e" + }, + "version": "4.1.96.Final" + }, + "io.netty:netty-transport": { + "shasums": { + "jar": "b1deeceedab3734cdb959c55f4be5ab4a667a8aed59121ff93763f49470f5470" + }, + "version": "4.1.100.Final" + }, + "io.netty:netty-transport-classes-epoll": { + "shasums": { + "jar": "ee65fa17fe65f18fd22269f92bddad85bfb3a263cf65eba01e116a2f30b86ff5" + }, + "version": "4.1.97.Final" + }, + "io.netty:netty-transport-classes-kqueue": { + "shasums": { + "jar": "964ef63eb24a5c979f0af473da13f9574497e11bd41543a66d10609d34013b9f" + }, + "version": "4.1.97.Final" + }, + "io.netty:netty-transport-native-epoll": { + "shasums": { + "jar": "418a0d0d66d2d52a63a0e2cd5377f8c3186db47c09e3b8af39a43fec39c077fe", + "linux-x86_64": "1e83fc9f82e5415cdbb688c6a5c6bbbd7d198e9fdd6fdf64b3dc5d54dd1acfd0" + }, + "version": "4.1.97.Final" + }, + "io.netty:netty-transport-native-kqueue": { + "shasums": { + "jar": "85916dd7569148bb3d4bc831d45846807b39d2b6f593dc8794a42ca71a4086c9", + "osx-x86_64": "6870051aca7fa4dc5d0f2938036215a269504c50d2e36c4af38fd00d22ad7d95" + }, + "version": "4.1.97.Final" + }, + "io.netty:netty-transport-native-unix-common": { + "shasums": { + "jar": "5d888230a04c4a3e647c64e933cefb64fd49056f969bfb734c8a3fcedf0bea8a" + }, + "version": "4.1.100.Final" + }, + "io.opencensus:opencensus-api": { + "shasums": { + "jar": "f1474d47f4b6b001558ad27b952e35eda5cc7146788877fc52938c6eba24b382" + }, + "version": "0.31.1" + }, + "io.opencensus:opencensus-contrib-http-util": { + "shasums": { + "jar": "3ea995b55a4068be22989b70cc29a4d788c2d328d1d50613a7a9afd13fdd2d0a" + }, + "version": "0.31.1" + }, + "io.perfmark:perfmark-api": { + "shasums": { + "jar": "b7d23e93a34537ce332708269a0d1404788a5b5e1949e82f5535fce51b3ea95b" + }, + "version": "0.26.0" + }, + "io.projectreactor:reactor-core": { + "shasums": { + "jar": "86017581188627ae6de5d3822882f3594f87f9289ec4479391790ccfd5631508" + }, + "version": "3.5.3" + }, + "io.prometheus:simpleclient": { + "shasums": { + "jar": "a43d6c00e3964a7063c1360ddcddc598df4f8e659a8313b27f90e4c555badb1d" + }, + "version": "0.15.0" + }, + "io.prometheus:simpleclient_common": { + "shasums": { + "jar": "8d2fa21b5c7959010818245788bd43131633dea989d3facb28cec45b2da37918" + }, + "version": "0.15.0" + }, + "io.prometheus:simpleclient_hotspot": { + "shasums": { + "jar": "3c99768b090065bc0b25219061f94970aa569a2e363488d9120c79769d78c1a6" + }, + "version": "0.15.0" + }, + "io.prometheus:simpleclient_httpserver": { + "shasums": { + "jar": "a1a16e1f804e3382ed8b400220ecb2913c96412d937e618f54a7088e6eb432b6" + }, + "version": "0.15.0" + }, + "io.prometheus:simpleclient_tracer_common": { + "shasums": { + "jar": "1baef082e619c06262e23de1b46ad35eb4df36ceb19be06ac7ef32a9833e12a4" + }, + "version": "0.15.0" + }, + "io.prometheus:simpleclient_tracer_otel": { + "shasums": { + "jar": "0595251da49aa7997777b365ffdf97f5e2e88cd7f0dacf49add91b4fc8222b50" + }, + "version": "0.15.0" + }, + "io.prometheus:simpleclient_tracer_otel_agent": { + "shasums": { + "jar": "0cbb4c29d17e9fe71bb2cec013c2999ae8a9050f237ad33211761b40d2586e0b" + }, + "version": "0.15.0" + }, + "io.reactivex.rxjava3:rxjava": { + "shasums": { + "jar": "34682bd3ec6f043c5defb589a2d18113ba3e2d2372dd401744f8e4815c1db638" + }, + "version": "3.1.6" + }, + "jakarta.activation:jakarta.activation-api": { + "shasums": { + "jar": "8b0a0f52fa8b05c5431921a063ed866efaa41dadf2e3a7ee3e1961f2b0d9645b" + }, + "version": "1.2.1" + }, + "jakarta.annotation:jakarta.annotation-api": { + "shasums": { + "jar": "85fb03fc054cdf4efca8efd9b6712bbb418e1ab98241c4539c8585bbc23e1b8a" + }, + "version": "1.3.5" + }, + "jakarta.ws.rs:jakarta.ws.rs-api": { + "shasums": { + "jar": "4cea299c846c8a6e6470cbfc2f7c391bc29b9caa2f9264ac1064ba91691f4adf" + }, + "version": "2.1.6" + }, + "jakarta.xml.bind:jakarta.xml.bind-api": { + "shasums": { + "jar": "69156304079bdeed9fc0ae3b39389f19b3cc4ba4443bc80508995394ead742ea" + }, + "version": "2.3.2" + }, + "javax.annotation:javax.annotation-api": { + "shasums": { + "jar": "e04ba5195bcd555dc95650f7cc614d151e4bcd52d29a10b8aa2197f3ab89ab9b" + }, + "version": "1.3.2" + }, + "javax.cache:cache-api": { + "shasums": { + "jar": "9f34e007edfa82a7b2a2e1b969477dcf5099ce7f4f926fb54ce7e27c4a0cd54b" + }, + "version": "1.1.1" + }, + "javax.inject:javax.inject": { + "shasums": { + "jar": "91c77044a50c481636c32d916fd89c9118a72195390452c81065080f957de7ff" + }, + "version": "1" + }, + "joda-time:joda-time": { + "shasums": { + "jar": "b4670b95f75957c974284c5f3ada966040be2578f643c5c6083d262162061fa2" + }, + "version": "2.8.1" + }, + "junit:junit": { + "shasums": { + "jar": "8e495b634469d64fb8acfa3495a065cbacc8a0fff55ce1e31007be4c16dc57d3" + }, + "version": "4.13.2" + }, + "me.dinowernli:java-grpc-prometheus": { + "shasums": { + "jar": "badf9c84d9ea4b598bfa3fc690c85a8f6d863265829b9cb79f33884d48729ed8" + }, + "version": "0.6.0" + }, + "net.bytebuddy:byte-buddy": { + "shasums": { + "jar": "62ae28187ed2b062813da6a9d567bfee733c341582699b62dd980230729a0313" + }, + "version": "1.14.11" + }, + "net.bytebuddy:byte-buddy-agent": { + "shasums": { + "jar": "2f537a621a64fa7013d68c695a76a34ee8d79dad74e635caca16dd56257aeb80" + }, + "version": "1.14.11" + }, + "net.java.dev.jna:jna": { + "shasums": { + "jar": "66d4f819a062a51a1d5627bffc23fac55d1677f0e0a1feba144aabdd670a64bb" + }, + "version": "5.13.0" + }, + "net.java.dev.jna:jna-platform": { + "shasums": { + "jar": "474d7b88f6e97009b6ec1d98c3024dd95c23187c65dabfbc35331bcac3d173dd" + }, + "version": "5.13.0" + }, + "net.javacrumbs.future-converter:future-converter-common": { + "shasums": { + "jar": "567aeb2907088298fe5e67fd0fb1843571c24b46ef5b369f495c3d52c654b67b" + }, + "version": "1.2.0" + }, + "net.javacrumbs.future-converter:future-converter-guava-common": { + "shasums": { + "jar": "82bfab706005ea51c3e76958a62564367cf9cae207c0b1d55b9734876b9780c1" + }, + "version": "1.2.0" + }, + "net.javacrumbs.future-converter:future-converter-java8-common": { + "shasums": { + "jar": "bed25293fabbf59e048f67f88e55140ebc1cfa4fa899e397545d0193e866a65c" + }, + "version": "1.2.0" + }, + "net.javacrumbs.future-converter:future-converter-java8-guava": { + "shasums": { + "jar": "3b47ae8e2b2bfad810586c37537f002273c05237bd3adecafe9f9f57a2b18fde" + }, + "version": "1.2.0" + }, + "net.jcip:jcip-annotations": { + "shasums": { + "jar": "be5805392060c71474bf6c9a67a099471274d30b83eef84bfc4e0889a4f1dcc0" + }, + "version": "1.0" + }, + "net.minidev:accessors-smart": { + "shasums": { + "jar": "accdd5c7ac4c49b155890aaea1ffca2a9ccd5826b562dd95a99fc1887003e031" + }, + "version": "2.4.9" + }, + "net.minidev:json-smart": { + "shasums": { + "jar": "70cab5e9488630dc631b1fc6e7fa550d95cddd19ba14db39ceca7cabfbd4e5ae" + }, + "version": "2.4.10" + }, + "net.sf.jopt-simple:jopt-simple": { + "shasums": { + "jar": "df26cc58f235f477db07f753ba5a3ab243ebe5789d9f89ecf68dd62ea9a66c28" + }, + "version": "5.0.4" + }, + "org.apache.commons:commons-compress": { + "shasums": { + "jar": "c267f17160e9ef662b4d78b7f29dca7c82b15c5cff2cb6a9865ef4ab3dd5b787" + }, + "version": "1.23.0" + }, + "org.apache.commons:commons-lang3": { + "shasums": { + "jar": "82f528cf718c7a3c2f30fc5bc784e3c6a0a10b17605dadb9e16c82ede11e6064" + }, + "version": "3.13.0" + }, + "org.apache.commons:commons-math3": { + "shasums": { + "jar": "1e56d7b058d28b65abd256b8458e3885b674c1d588fa43cd7d1cbb9c7ef2b308" + }, + "version": "3.6.1" + }, + "org.apache.commons:commons-pool2": { + "shasums": { + "jar": "6d3bd18df8410f3e31b031aca582cc109342358a62a2759ebd0c4cdf30d06f8b" + }, + "version": "2.12.0" + }, + "org.apache.httpcomponents:httpclient": { + "shasums": { + "jar": "6fe9026a566c6a5001608cf3fc32196641f6c1e5e1986d1037ccdbd5f31ef743" + }, + "version": "4.5.13" + }, + "org.apache.httpcomponents:httpcore": { + "shasums": { + "jar": "3cbaed088c499a10f96dde58f39dc0e7985171abd88138ca1655a872011bb142" + }, + "version": "4.4.15" + }, + "org.apache.tomcat:annotations-api": { + "shasums": { + "jar": "253829d3c12b7381d1044fc22c6436cff025fe0d459e4a329413e560a7d0dd13" + }, + "version": "6.0.53" + }, + "org.bouncycastle:bcpkix-jdk18on": { + "shasums": { + "jar": "9e2c1db5a6ed29fbc36b438d39ca9feb901bb69bad0ce8d7bc735264bea79bd3" + }, + "version": "1.75" + }, + "org.bouncycastle:bcprov-jdk15on": { + "shasums": { + "jar": "8f3c20e3e2d565d26f33e8d4857a37d0d7f8ac39b62a7026496fcab1bdac30d4" + }, + "version": "1.70" + }, + "org.bouncycastle:bcprov-jdk18on": { + "shasums": { + "jar": "7f24018e9212dbda61c69212f8d7b1524c28efb978f10df590df3b4ccac47bd5" + }, + "version": "1.75" + }, + "org.bouncycastle:bcutil-jdk18on": { + "shasums": { + "jar": "027f36578c1ffdf08878c1cc2aa1e191f4b9da119c1e8f113299c53f298fa664" + }, + "version": "1.75" + }, + "org.checkerframework:checker-qual": { + "shasums": { + "jar": "9bd02cbe679a58afa0fba44c9621fe70130653e8c4564eb8d65e14bbfe26b7f8" + }, + "version": "3.38.0" + }, + "org.codehaus.mojo:animal-sniffer-annotations": { + "shasums": { + "jar": "9ffe526bf43a6348e9d8b33b9cd6f580a7f5eed0cf055913007eda263de974d0" + }, + "version": "1.23" + }, + "org.glassfish.hk2.external:aopalliance-repackaged": { + "shasums": { + "jar": "bad77f9278d753406360af9e4747bd9b3161554ea9cd3d62411a0ae1f2c141fd" + }, + "version": "2.6.1" + }, + "org.glassfish.hk2.external:jakarta.inject": { + "shasums": { + "jar": "5e88c123b3e41bca788b2683118867d9b6dec714247ea91c588aed46a36ee24f" + }, + "version": "2.6.1" + }, + "org.glassfish.hk2:hk2-api": { + "shasums": { + "jar": "c2cb80a01e58440ae57d5ee59af4d4d94e5180e04aff112b0cb611c07d61e773" + }, + "version": "2.6.1" + }, + "org.glassfish.hk2:hk2-locator": { + "shasums": { + "jar": "febc668deb9f2000c76bd4918d8086c0a4c74d07bd0c60486b72c6bd38b62874" + }, + "version": "2.6.1" + }, + "org.glassfish.hk2:hk2-utils": { + "shasums": { + "jar": "30727f79086452fdefdab08451d982c2082aa239d9f75cdeb1ba271e3c887036" + }, + "version": "2.6.1" + }, + "org.glassfish.hk2:osgi-resource-locator": { + "shasums": { + "jar": "aab5d7849f7cfcda2cc7c541ba1bd365151d42276f151c825387245dfde3dd74" + }, + "version": "1.0.3" + }, + "org.glassfish.jersey.connectors:jersey-apache-connector": { + "shasums": { + "jar": "28e87f2edc5284e293072941cea5e8ff462bb60f41c67b4ad7b906de2a7a8bd8" + }, + "version": "2.30.1" + }, + "org.glassfish.jersey.core:jersey-client": { + "shasums": { + "jar": "fe0aa736ce216e9efb6e17392142b87e704cf09e75a0cb6b3fd2d146937225c1" + }, + "version": "2.30.1" + }, + "org.glassfish.jersey.core:jersey-common": { + "shasums": { + "jar": "273c3ea4e3ff9b960eb8dbb7c74e0127436678e486ccd94a351729f22a249830" + }, + "version": "2.30.1" + }, + "org.glassfish.jersey.inject:jersey-hk2": { + "shasums": { + "jar": "cd5f4c10cf4915d1c217c295fc8b4eadceda7a28f9488b1d01de6b8792b33496" + }, + "version": "2.30.1" + }, + "org.hamcrest:hamcrest-core": { + "shasums": { + "jar": "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9" + }, + "version": "1.3" + }, + "org.javassist:javassist": { + "shasums": { + "jar": "57d0a9e9286f82f4eaa851125186997f811befce0e2060ff0a15a77f5a9dd9a7" + }, + "version": "3.28.0-GA" + }, + "org.jboss.marshalling:jboss-marshalling": { + "shasums": { + "jar": "93d6257e1ac0f93ba6ff85827c9ef65b5efabf7bd2241fb3b4caf6c426f4f149" + }, + "version": "2.0.11.Final" + }, + "org.jboss.marshalling:jboss-marshalling-river": { + "shasums": { + "jar": "f3fa6545d15163468e1639fe3087de22234a9fd027a52be6e532bfe7bde6c554" + }, + "version": "2.0.11.Final" + }, + "org.jetbrains:annotations": { + "shasums": { + "jar": "245abad9a39eab1266ac9a8796980f462577e708ef3f6d43be2e008e4b72b9b4" + }, + "version": "16.0.2" + }, + "org.jodd:jodd-bean": { + "shasums": { + "jar": "d07d805fe0d59b5d2dbc85d0ebfcf30f52d7fd5a3ff89ff4fbea1e46b1319705" + }, + "version": "5.1.6" + }, + "org.jodd:jodd-core": { + "shasums": { + "jar": "4b504519263a98202480d3cf73562dff8245edc582350cc5f37d5965a0298122" + }, + "version": "5.1.6" + }, + "org.json:json": { + "shasums": { + "jar": "0f18192df289114e17aa1a0d0a7f8372cc9f5c7e4f7e39adcf8906fe714fa7d3" + }, + "version": "20231013" + }, + "org.luaj:luaj-jse": { + "shasums": { + "jar": "9b1f0a3e8f68427c6d74c2bf00ae0e6dbfce35994d3001fed4cef6ecda50be55" + }, + "version": "3.0.1" + }, + "org.mockito:mockito-core": { + "shasums": { + "jar": "0323f591b04d3a0d7ca9ebeebb9e9f34a07c0ec9169b7444ee3951b71d4cad56" + }, + "version": "5.10.0" + }, + "org.objenesis:objenesis": { + "shasums": { + "jar": "02dfd0b0439a5591e35b708ed2f5474eb0948f53abf74637e959b8e4ef69bfeb" + }, + "version": "3.3" + }, + "org.openjdk.jmh:jmh-core": { + "shasums": { + "jar": "dc0eaf2bbf0036a70b60798c785d6e03a9daf06b68b8edb0f1ba9eb3421baeb3" + }, + "version": "1.37" + }, + "org.openjdk.jmh:jmh-generator-annprocess": { + "shasums": { + "jar": "6a5604b5b804e0daca1145df1077609321687734a8b49387e49f10557c186c77" + }, + "version": "1.37" + }, + "org.ow2.asm:asm": { + "shasums": { + "jar": "b62e84b5980729751b0458c534cf1366f727542bb8d158621335682a460f0353" + }, + "version": "9.5" + }, + "org.ow2.asm:asm-analysis": { + "shasums": { + "jar": "878fbe521731c072d14d2d65b983b1beae6ad06fda0007b6a8bae81f73f433c4" + }, + "version": "9.2" + }, + "org.ow2.asm:asm-commons": { + "shasums": { + "jar": "be4ce53138a238bb522cd781cf91f3ba5ce2f6ca93ec62d46a162a127225e0a6" + }, + "version": "9.2" + }, + "org.ow2.asm:asm-tree": { + "shasums": { + "jar": "aabf9bd23091a4ebfc109c1f3ee7cf3e4b89f6ba2d3f51c5243f16b3cffae011" + }, + "version": "9.2" + }, + "org.ow2.asm:asm-util": { + "shasums": { + "jar": "ff5b3cd331ae8a9a804768280da98f50f424fef23dd3c788bb320e08c94ee598" + }, + "version": "9.2" + }, + "org.pcollections:pcollections": { + "shasums": { + "jar": "34f579ba075c8da2c8a0fedd0f04e21eac2fb6c660d90d0fabb573e8b4dc6918" + }, + "version": "3.1.4" + }, + "org.projectlombok:lombok": { + "shasums": { + "jar": "14151b47582d570b4de16a147ece3bdbd19ace4aee5bde3a5578c87db9ecb998" + }, + "version": "1.18.30" + }, + "org.reactivestreams:reactive-streams": { + "shasums": { + "jar": "f75ca597789b3dac58f61857b9ac2e1034a68fa672db35055a8fb4509e325f28" + }, + "version": "1.0.4" + }, + "org.redisson:redisson": { + "shasums": { + "jar": "fe59768d63419b0073c0cbd6029d0be864ad5c9d233dd1337945f9edfe3df3ca" + }, + "version": "3.23.4" + }, + "org.reflections:reflections": { + "shasums": { + "jar": "938a2d08fe54050d7610b944d8ddc3a09355710d9e6be0aac838dbc04e9a2825" + }, + "version": "0.10.2" + }, + "org.slf4j:jcl-over-slf4j": { + "shasums": { + "jar": "71e9ee37b9e4eb7802a2acc5f41728a4cf3915e7483d798db3b4ff2ec8847c50" + }, + "version": "1.7.30" + }, + "org.slf4j:slf4j-api": { + "shasums": { + "jar": "0818930dc8d7debb403204611691da58e49d42c50b6ffcfdce02dadb7c3c2b6c" + }, + "version": "2.0.9" + }, + "org.slf4j:slf4j-simple": { + "shasums": { + "jar": "71f9c6de6dbaec2d10caa303faf08c5e749be53b242896c64c96b7c6bb6d62dc" + }, + "version": "2.0.9" + }, + "org.threeten:threetenbp": { + "shasums": { + "jar": "e4b1eb3d90c38a54c7f3384fda957e0b5bf0b41b40672a44ae8b03cb6c87ce06" + }, + "version": "1.6.8" + }, + "org.xerial:sqlite-jdbc": { + "shasums": { + "jar": "605979c94e7fe00437f1e10dcfa657a23f125c8eb4d2f0ec17e3f84613894cc3" + }, + "version": "3.34.0" + }, + "org.yaml:snakeyaml": { + "shasums": { + "jar": "1467931448a0817696ae2805b7b8b20bfb082652bf9c4efaed528930dc49389b" + }, + "version": "2.2" + }, + "redis.clients:jedis": { + "shasums": { + "jar": "d171e10406c0362178ee15f5098d0be51390638f702bb55941571a2ff8c12a57" + }, + "version": "5.1.2" + }, + "software.amazon.ion:ion-java": { + "shasums": { + "jar": "0d127b205a1fce0abc2a3757a041748651bc66c15cf4c059bac5833b27d471a5" + }, + "version": "1.0.2" + } + }, + "dependencies": { + "com.amazonaws:aws-java-sdk-core": [ + "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", + "commons-codec:commons-codec", + "commons-logging:commons-logging", + "joda-time:joda-time", + "org.apache.httpcomponents:httpclient", + "software.amazon.ion:ion-java" + ], + "com.amazonaws:aws-java-sdk-kms": [ + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:jmespath-java" + ], + "com.amazonaws:aws-java-sdk-s3": [ + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:jmespath-java" + ], + "com.amazonaws:aws-java-sdk-secretsmanager": [ + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:jmespath-java" + ], + "com.amazonaws:jmespath-java": [ + "com.fasterxml.jackson.core:jackson-databind" + ], + "com.esotericsoftware:kryo": [ + "com.esotericsoftware:minlog", + "com.esotericsoftware:reflectasm", + "org.objenesis:objenesis" + ], + "com.fasterxml.jackson.core:jackson-databind": [ + "com.fasterxml.jackson.core:jackson-annotations", + "com.fasterxml.jackson.core:jackson-core" + ], + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": [ + "com.fasterxml.jackson.core:jackson-core", + "com.fasterxml.jackson.core:jackson-databind" + ], + "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": [ + "com.fasterxml.jackson.core:jackson-core", + "com.fasterxml.jackson.core:jackson-databind", + "org.yaml:snakeyaml" + ], + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base": [ + "com.fasterxml.jackson.core:jackson-core", + "com.fasterxml.jackson.core:jackson-databind" + ], + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider": [ + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base", + "com.fasterxml.jackson.module:jackson-module-jaxb-annotations" + ], + "com.fasterxml.jackson.module:jackson-module-jaxb-annotations": [ + "com.fasterxml.jackson.core:jackson-annotations", + "com.fasterxml.jackson.core:jackson-core", + "com.fasterxml.jackson.core:jackson-databind", + "jakarta.activation:jakarta.activation-api", + "jakarta.xml.bind:jakarta.xml.bind-api" + ], + "com.github.ben-manes.caffeine:caffeine": [ + "com.google.errorprone:error_prone_annotations", + "org.checkerframework:checker-qual" + ], + "com.github.docker-java:docker-java": [ + "com.github.docker-java:docker-java-core", + "com.github.docker-java:docker-java-transport-jersey", + "com.github.docker-java:docker-java-transport-netty", + "org.slf4j:jcl-over-slf4j" + ], + "com.github.docker-java:docker-java-api": [ + "com.fasterxml.jackson.core:jackson-annotations", + "org.slf4j:slf4j-api" + ], + "com.github.docker-java:docker-java-core": [ + "com.fasterxml.jackson.core:jackson-databind", + "com.github.docker-java:docker-java-api", + "com.github.docker-java:docker-java-transport", + "com.google.guava:guava", + "commons-io:commons-io", + "org.apache.commons:commons-compress", + "org.apache.commons:commons-lang3", + "org.bouncycastle:bcpkix-jdk18on", + "org.slf4j:slf4j-api" + ], + "com.github.docker-java:docker-java-transport-jersey": [ + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider", + "com.github.docker-java:docker-java-core", + "com.kohlschutter.junixsocket:junixsocket-common", + "com.kohlschutter.junixsocket:junixsocket-native-common", + "org.apache.httpcomponents:httpclient", + "org.apache.httpcomponents:httpcore", + "org.glassfish.jersey.connectors:jersey-apache-connector", + "org.glassfish.jersey.core:jersey-client", + "org.glassfish.jersey.inject:jersey-hk2" + ], + "com.github.docker-java:docker-java-transport-netty": [ + "com.github.docker-java:docker-java-core", + "io.netty:netty-codec-http", + "io.netty:netty-handler", + "io.netty:netty-handler-proxy", + "io.netty:netty-transport-native-epoll:jar:linux-x86_64", + "io.netty:netty-transport-native-kqueue:jar:osx-x86_64" + ], + "com.github.fppt:jedis-mock": [ + "org.luaj:luaj-jse", + "org.reflections:reflections", + "org.slf4j:slf4j-api", + "redis.clients:jedis" + ], + "com.github.jnr:jnr-ffi": [ + "com.github.jnr:jffi", + "com.github.jnr:jffi:jar:native", + "com.github.jnr:jnr-a64asm", + "com.github.jnr:jnr-x86asm", + "org.ow2.asm:asm", + "org.ow2.asm:asm-analysis", + "org.ow2.asm:asm-commons", + "org.ow2.asm:asm-tree", + "org.ow2.asm:asm-util" + ], + "com.github.jnr:jnr-posix": [ + "com.github.jnr:jnr-constants", + "com.github.jnr:jnr-ffi" + ], + "com.github.oshi:oshi-core": [ + "net.java.dev.jna:jna", + "net.java.dev.jna:jna-platform", + "org.slf4j:slf4j-api" + ], + "com.github.pcj:google-options": [ + "com.google.code.findbugs:jsr305", + "com.google.guava:guava" + ], + "com.github.serceman:jnr-fuse": [ + "com.github.jnr:jnr-constants", + "com.github.jnr:jnr-ffi", + "com.github.jnr:jnr-posix" + ], + "com.google.api.grpc:proto-google-common-protos": [ + "com.google.protobuf:protobuf-java" + ], + "com.google.auth:google-auth-library-oauth2-http": [ + "com.google.auth:google-auth-library-credentials", + "com.google.auto.value:auto-value-annotations", + "com.google.code.findbugs:jsr305", + "com.google.guava:guava", + "com.google.http-client:google-http-client", + "com.google.http-client:google-http-client-gson" + ], + "com.google.auto:auto-common": [ + "com.google.guava:guava" + ], + "com.google.errorprone:error_prone_annotation": [ + "com.google.guava:guava" + ], + "com.google.errorprone:error_prone_check_api": [ + "com.github.ben-manes.caffeine:caffeine", + "com.github.kevinstern:software-and-algorithms", + "com.google.auto.value:auto-value-annotations", + "com.google.code.findbugs:jsr305", + "com.google.errorprone:error_prone_annotation", + "com.google.errorprone:error_prone_annotations", + "com.google.inject:guice", + "io.github.eisop:dataflow-errorprone", + "io.github.java-diff-utils:java-diff-utils" + ], + "com.google.errorprone:error_prone_core": [ + "com.google.auto.service:auto-service-annotations", + "com.google.auto.value:auto-value-annotations", + "com.google.auto:auto-common", + "com.google.code.findbugs:jsr305", + "com.google.errorprone:error_prone_annotation", + "com.google.errorprone:error_prone_annotations", + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_type_annotations", + "com.google.guava:guava", + "com.google.protobuf:protobuf-java", + "io.github.eisop:dataflow-errorprone", + "javax.inject:javax.inject", + "org.pcollections:pcollections" + ], + "com.google.guava:guava": [ + "com.google.code.findbugs:jsr305", + "com.google.errorprone:error_prone_annotations", + "com.google.guava:failureaccess", + "com.google.guava:listenablefuture", + "com.google.j2objc:j2objc-annotations", + "org.checkerframework:checker-qual" + ], + "com.google.guava:guava-testlib": [ + "com.google.code.findbugs:jsr305", + "com.google.errorprone:error_prone_annotations", + "com.google.guava:guava", + "com.google.j2objc:j2objc-annotations", + "junit:junit", + "org.checkerframework:checker-qual" + ], + "com.google.http-client:google-http-client": [ + "com.google.code.findbugs:jsr305", + "com.google.errorprone:error_prone_annotations", + "com.google.guava:guava", + "com.google.j2objc:j2objc-annotations", + "io.opencensus:opencensus-api", + "io.opencensus:opencensus-contrib-http-util", + "org.apache.httpcomponents:httpclient", + "org.apache.httpcomponents:httpcore" + ], + "com.google.http-client:google-http-client-gson": [ + "com.google.code.gson:gson", + "com.google.http-client:google-http-client" + ], + "com.google.inject:guice": [ + "aopalliance:aopalliance", + "com.google.guava:guava", + "javax.inject:javax.inject" + ], + "com.google.jimfs:jimfs": [ + "com.google.guava:guava" + ], + "com.google.protobuf:protobuf-java-util": [ + "com.google.code.findbugs:jsr305", + "com.google.code.gson:gson", + "com.google.errorprone:error_prone_annotations", + "com.google.guava:guava", + "com.google.j2objc:j2objc-annotations", + "com.google.protobuf:protobuf-java" + ], + "com.google.truth:truth": [ + "com.google.auto.value:auto-value-annotations", + "com.google.errorprone:error_prone_annotations", + "com.google.guava:guava", + "junit:junit", + "org.checkerframework:checker-qual", + "org.ow2.asm:asm" + ], + "com.googlecode.json-simple:json-simple": [ + "junit:junit" + ], + "com.jayway.jsonpath:json-path": [ + "net.minidev:json-smart", + "org.slf4j:slf4j-api" + ], + "io.grpc:grpc-api": [ + "com.google.code.findbugs:jsr305", + "com.google.errorprone:error_prone_annotations", + "com.google.guava:guava" + ], + "io.grpc:grpc-auth": [ + "com.google.auth:google-auth-library-credentials", + "com.google.guava:guava", + "io.grpc:grpc-api" + ], + "io.grpc:grpc-context": [ + "io.grpc:grpc-api" + ], + "io.grpc:grpc-core": [ + "com.google.android:annotations", + "com.google.code.gson:gson", + "com.google.errorprone:error_prone_annotations", + "com.google.guava:guava", + "io.grpc:grpc-api", + "io.grpc:grpc-context", + "io.perfmark:perfmark-api", + "org.codehaus.mojo:animal-sniffer-annotations" + ], + "io.grpc:grpc-inprocess": [ + "com.google.guava:guava", + "io.grpc:grpc-api", + "io.grpc:grpc-core" + ], + "io.grpc:grpc-netty": [ + "com.google.errorprone:error_prone_annotations", + "com.google.guava:guava", + "io.grpc:grpc-api", + "io.grpc:grpc-core", + "io.grpc:grpc-util", + "io.netty:netty-codec-http2", + "io.netty:netty-handler-proxy", + "io.netty:netty-transport-native-unix-common", + "io.perfmark:perfmark-api" + ], + "io.grpc:grpc-netty-shaded": [ + "com.google.errorprone:error_prone_annotations", + "com.google.guava:guava", + "io.grpc:grpc-api", + "io.grpc:grpc-core", + "io.grpc:grpc-util", + "io.perfmark:perfmark-api" + ], + "io.grpc:grpc-protobuf": [ + "com.google.api.grpc:proto-google-common-protos", + "com.google.code.findbugs:jsr305", + "com.google.guava:guava", + "com.google.protobuf:protobuf-java", + "io.grpc:grpc-api", + "io.grpc:grpc-protobuf-lite" + ], + "io.grpc:grpc-protobuf-lite": [ + "com.google.code.findbugs:jsr305", + "com.google.guava:guava", + "io.grpc:grpc-api" + ], + "io.grpc:grpc-services": [ + "com.google.code.gson:gson", + "com.google.errorprone:error_prone_annotations", + "com.google.guava:guava", + "com.google.j2objc:j2objc-annotations", + "com.google.protobuf:protobuf-java-util", + "io.grpc:grpc-core", + "io.grpc:grpc-protobuf", + "io.grpc:grpc-stub", + "io.grpc:grpc-util" + ], + "io.grpc:grpc-stub": [ + "com.google.errorprone:error_prone_annotations", + "com.google.guava:guava", + "io.grpc:grpc-api" + ], + "io.grpc:grpc-testing": [ + "io.grpc:grpc-api", + "io.grpc:grpc-core", + "io.grpc:grpc-inprocess", + "io.grpc:grpc-stub", + "io.grpc:grpc-util", + "junit:junit" + ], + "io.grpc:grpc-util": [ + "com.google.guava:guava", + "io.grpc:grpc-api", + "io.grpc:grpc-core", + "org.codehaus.mojo:animal-sniffer-annotations" + ], + "io.netty:netty-buffer": [ + "io.netty:netty-common" + ], + "io.netty:netty-codec": [ + "io.netty:netty-buffer", + "io.netty:netty-common", + "io.netty:netty-transport" + ], + "io.netty:netty-codec-dns": [ + "io.netty:netty-buffer", + "io.netty:netty-codec", + "io.netty:netty-common", + "io.netty:netty-transport" + ], + "io.netty:netty-codec-http": [ + "io.netty:netty-buffer", + "io.netty:netty-codec", + "io.netty:netty-common", + "io.netty:netty-handler", + "io.netty:netty-transport" + ], + "io.netty:netty-codec-http2": [ + "io.netty:netty-buffer", + "io.netty:netty-codec", + "io.netty:netty-codec-http", + "io.netty:netty-common", + "io.netty:netty-handler", + "io.netty:netty-transport" + ], + "io.netty:netty-codec-socks": [ + "io.netty:netty-buffer", + "io.netty:netty-codec", + "io.netty:netty-common", + "io.netty:netty-transport" + ], + "io.netty:netty-handler": [ + "io.netty:netty-buffer", + "io.netty:netty-codec", + "io.netty:netty-common", + "io.netty:netty-resolver", + "io.netty:netty-transport", + "io.netty:netty-transport-native-unix-common" + ], + "io.netty:netty-handler-proxy": [ + "io.netty:netty-buffer", + "io.netty:netty-codec", + "io.netty:netty-codec-http", + "io.netty:netty-codec-socks", + "io.netty:netty-common", + "io.netty:netty-transport" + ], + "io.netty:netty-resolver": [ + "io.netty:netty-common" + ], + "io.netty:netty-resolver-dns": [ + "io.netty:netty-buffer", + "io.netty:netty-codec", + "io.netty:netty-codec-dns", + "io.netty:netty-common", + "io.netty:netty-handler", + "io.netty:netty-resolver", + "io.netty:netty-transport" + ], + "io.netty:netty-transport": [ + "io.netty:netty-buffer", + "io.netty:netty-common", + "io.netty:netty-resolver" + ], + "io.netty:netty-transport-classes-epoll": [ + "io.netty:netty-buffer", + "io.netty:netty-common", + "io.netty:netty-transport", + "io.netty:netty-transport-native-unix-common" + ], + "io.netty:netty-transport-classes-kqueue": [ + "io.netty:netty-buffer", + "io.netty:netty-common", + "io.netty:netty-transport", + "io.netty:netty-transport-native-unix-common" + ], + "io.netty:netty-transport-native-epoll": [ + "io.netty:netty-buffer", + "io.netty:netty-common", + "io.netty:netty-transport", + "io.netty:netty-transport-classes-epoll", + "io.netty:netty-transport-native-unix-common" + ], + "io.netty:netty-transport-native-epoll:jar:linux-x86_64": [ + "io.netty:netty-buffer", + "io.netty:netty-common", + "io.netty:netty-transport", + "io.netty:netty-transport-classes-epoll", + "io.netty:netty-transport-native-unix-common" + ], + "io.netty:netty-transport-native-kqueue": [ + "io.netty:netty-buffer", + "io.netty:netty-common", + "io.netty:netty-transport", + "io.netty:netty-transport-classes-kqueue", + "io.netty:netty-transport-native-unix-common" + ], + "io.netty:netty-transport-native-kqueue:jar:osx-x86_64": [ + "io.netty:netty-buffer", + "io.netty:netty-common", + "io.netty:netty-transport", + "io.netty:netty-transport-classes-kqueue", + "io.netty:netty-transport-native-unix-common" + ], + "io.netty:netty-transport-native-unix-common": [ + "io.netty:netty-buffer", + "io.netty:netty-common", + "io.netty:netty-transport" + ], + "io.opencensus:opencensus-api": [ + "io.grpc:grpc-context" + ], + "io.opencensus:opencensus-contrib-http-util": [ + "com.google.guava:guava", + "io.opencensus:opencensus-api" + ], + "io.projectreactor:reactor-core": [ + "org.reactivestreams:reactive-streams" + ], + "io.prometheus:simpleclient": [ + "io.prometheus:simpleclient_tracer_otel", + "io.prometheus:simpleclient_tracer_otel_agent" + ], + "io.prometheus:simpleclient_common": [ + "io.prometheus:simpleclient" + ], + "io.prometheus:simpleclient_hotspot": [ + "io.prometheus:simpleclient" + ], + "io.prometheus:simpleclient_httpserver": [ + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common" + ], + "io.prometheus:simpleclient_tracer_otel": [ + "io.prometheus:simpleclient_tracer_common" + ], + "io.prometheus:simpleclient_tracer_otel_agent": [ + "io.prometheus:simpleclient_tracer_common" + ], + "io.reactivex.rxjava3:rxjava": [ + "org.reactivestreams:reactive-streams" + ], + "jakarta.xml.bind:jakarta.xml.bind-api": [ + "jakarta.activation:jakarta.activation-api" + ], + "junit:junit": [ + "org.hamcrest:hamcrest-core" + ], + "me.dinowernli:java-grpc-prometheus": [ + "com.google.code.findbugs:jsr305", + "com.google.guava:failureaccess", + "com.google.guava:guava", + "com.google.j2objc:j2objc-annotations", + "io.prometheus:simpleclient" + ], + "net.java.dev.jna:jna-platform": [ + "net.java.dev.jna:jna" + ], + "net.javacrumbs.future-converter:future-converter-guava-common": [ + "com.google.guava:guava", + "net.javacrumbs.future-converter:future-converter-common" + ], + "net.javacrumbs.future-converter:future-converter-java8-common": [ + "net.javacrumbs.future-converter:future-converter-common" + ], + "net.javacrumbs.future-converter:future-converter-java8-guava": [ + "net.javacrumbs.future-converter:future-converter-common", + "net.javacrumbs.future-converter:future-converter-guava-common", + "net.javacrumbs.future-converter:future-converter-java8-common" + ], + "net.minidev:accessors-smart": [ + "org.ow2.asm:asm" + ], + "net.minidev:json-smart": [ + "net.minidev:accessors-smart" + ], + "org.apache.httpcomponents:httpclient": [ + "commons-codec:commons-codec", + "commons-logging:commons-logging", + "org.apache.httpcomponents:httpcore" + ], + "org.bouncycastle:bcpkix-jdk18on": [ + "org.bouncycastle:bcprov-jdk18on", + "org.bouncycastle:bcutil-jdk18on" + ], + "org.bouncycastle:bcutil-jdk18on": [ + "org.bouncycastle:bcprov-jdk18on" + ], + "org.glassfish.hk2:hk2-api": [ + "org.glassfish.hk2.external:aopalliance-repackaged", + "org.glassfish.hk2.external:jakarta.inject", + "org.glassfish.hk2:hk2-utils" + ], + "org.glassfish.hk2:hk2-locator": [ + "org.glassfish.hk2.external:aopalliance-repackaged", + "org.glassfish.hk2.external:jakarta.inject", + "org.glassfish.hk2:hk2-api", + "org.glassfish.hk2:hk2-utils" + ], + "org.glassfish.hk2:hk2-utils": [ + "org.glassfish.hk2.external:jakarta.inject" + ], + "org.glassfish.jersey.connectors:jersey-apache-connector": [ + "jakarta.ws.rs:jakarta.ws.rs-api", + "org.apache.httpcomponents:httpclient", + "org.glassfish.jersey.core:jersey-client", + "org.glassfish.jersey.core:jersey-common" + ], + "org.glassfish.jersey.core:jersey-client": [ + "jakarta.ws.rs:jakarta.ws.rs-api", + "org.glassfish.hk2.external:jakarta.inject", + "org.glassfish.jersey.core:jersey-common" + ], + "org.glassfish.jersey.core:jersey-common": [ + "com.sun.activation:jakarta.activation", + "jakarta.annotation:jakarta.annotation-api", + "jakarta.ws.rs:jakarta.ws.rs-api", + "org.glassfish.hk2.external:jakarta.inject", + "org.glassfish.hk2:osgi-resource-locator" + ], + "org.glassfish.jersey.inject:jersey-hk2": [ + "org.glassfish.hk2:hk2-locator", + "org.glassfish.jersey.core:jersey-common", + "org.javassist:javassist" + ], + "org.jboss.marshalling:jboss-marshalling-river": [ + "org.jboss.marshalling:jboss-marshalling" + ], + "org.jodd:jodd-bean": [ + "org.jodd:jodd-core" + ], + "org.mockito:mockito-core": [ + "net.bytebuddy:byte-buddy", + "net.bytebuddy:byte-buddy-agent", + "org.objenesis:objenesis" + ], + "org.openjdk.jmh:jmh-core": [ + "net.sf.jopt-simple:jopt-simple", + "org.apache.commons:commons-math3" + ], + "org.openjdk.jmh:jmh-generator-annprocess": [ + "org.openjdk.jmh:jmh-core" + ], + "org.ow2.asm:asm-analysis": [ + "org.ow2.asm:asm-tree" + ], + "org.ow2.asm:asm-commons": [ + "org.ow2.asm:asm", + "org.ow2.asm:asm-analysis", + "org.ow2.asm:asm-tree" + ], + "org.ow2.asm:asm-tree": [ + "org.ow2.asm:asm" + ], + "org.ow2.asm:asm-util": [ + "org.ow2.asm:asm", + "org.ow2.asm:asm-analysis", + "org.ow2.asm:asm-tree" + ], + "org.redisson:redisson": [ + "com.esotericsoftware:kryo", + "com.fasterxml.jackson.core:jackson-annotations", + "com.fasterxml.jackson.core:jackson-core", + "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", + "io.netty:netty-buffer", + "io.netty:netty-codec", + "io.netty:netty-common", + "io.netty:netty-handler", + "io.netty:netty-resolver", + "io.netty:netty-resolver-dns", + "io.netty:netty-transport", + "io.projectreactor:reactor-core", + "io.reactivex.rxjava3:rxjava", + "javax.cache:cache-api", + "net.bytebuddy:byte-buddy", + "org.jboss.marshalling:jboss-marshalling", + "org.jboss.marshalling:jboss-marshalling-river", + "org.jodd:jodd-bean", + "org.reactivestreams:reactive-streams", + "org.slf4j:slf4j-api" + ], + "org.reflections:reflections": [ + "com.google.code.findbugs:jsr305", + "org.javassist:javassist", + "org.slf4j:slf4j-api" + ], + "org.slf4j:jcl-over-slf4j": [ + "org.slf4j:slf4j-api" + ], + "org.slf4j:slf4j-simple": [ + "org.slf4j:slf4j-api" + ], + "redis.clients:jedis": [ + "com.google.code.gson:gson", + "org.apache.commons:commons-pool2", + "org.json:json", + "org.slf4j:slf4j-api" + ] + }, + "packages": { + "aopalliance:aopalliance": [ + "org.aopalliance.aop", + "org.aopalliance.intercept" + ], + "com.amazonaws:aws-java-sdk-core": [ + "com.amazonaws", + "com.amazonaws.adapters.types", + "com.amazonaws.annotation", + "com.amazonaws.arn", + "com.amazonaws.auth", + "com.amazonaws.auth.internal", + "com.amazonaws.auth.policy", + "com.amazonaws.auth.policy.conditions", + "com.amazonaws.auth.policy.internal", + "com.amazonaws.auth.presign", + "com.amazonaws.auth.profile", + "com.amazonaws.auth.profile.internal", + "com.amazonaws.auth.profile.internal.securitytoken", + "com.amazonaws.cache", + "com.amazonaws.client", + "com.amazonaws.client.builder", + "com.amazonaws.endpointdiscovery", + "com.amazonaws.event", + "com.amazonaws.event.request", + "com.amazonaws.handlers", + "com.amazonaws.http", + "com.amazonaws.http.apache", + "com.amazonaws.http.apache.client.impl", + "com.amazonaws.http.apache.request.impl", + "com.amazonaws.http.apache.utils", + "com.amazonaws.http.client", + "com.amazonaws.http.conn", + "com.amazonaws.http.conn.ssl", + "com.amazonaws.http.conn.ssl.privileged", + "com.amazonaws.http.exception", + "com.amazonaws.http.impl.client", + "com.amazonaws.http.protocol", + "com.amazonaws.http.request", + "com.amazonaws.http.response", + "com.amazonaws.http.settings", + "com.amazonaws.http.timers", + "com.amazonaws.http.timers.client", + "com.amazonaws.http.timers.request", + "com.amazonaws.internal", + "com.amazonaws.internal.auth", + "com.amazonaws.internal.config", + "com.amazonaws.internal.http", + "com.amazonaws.jmx", + "com.amazonaws.jmx.spi", + "com.amazonaws.log", + "com.amazonaws.metrics", + "com.amazonaws.metrics.internal", + "com.amazonaws.monitoring", + "com.amazonaws.monitoring.internal", + "com.amazonaws.partitions", + "com.amazonaws.partitions.model", + "com.amazonaws.profile.path", + "com.amazonaws.profile.path.config", + "com.amazonaws.profile.path.cred", + "com.amazonaws.protocol", + "com.amazonaws.protocol.json", + "com.amazonaws.protocol.json.internal", + "com.amazonaws.regions", + "com.amazonaws.retry", + "com.amazonaws.retry.internal", + "com.amazonaws.retry.v2", + "com.amazonaws.transform", + "com.amazonaws.util", + "com.amazonaws.util.endpoint", + "com.amazonaws.util.json", + "com.amazonaws.waiters" + ], + "com.amazonaws:aws-java-sdk-kms": [ + "com.amazonaws.auth.policy.actions", + "com.amazonaws.services.kms", + "com.amazonaws.services.kms.model", + "com.amazonaws.services.kms.model.transform" + ], + "com.amazonaws:aws-java-sdk-s3": [ + "com.amazonaws.auth", + "com.amazonaws.auth.policy.actions", + "com.amazonaws.auth.policy.conditions", + "com.amazonaws.auth.policy.resources", + "com.amazonaws.services.s3", + "com.amazonaws.services.s3.event", + "com.amazonaws.services.s3.internal", + "com.amazonaws.services.s3.internal.auth", + "com.amazonaws.services.s3.internal.crypto", + "com.amazonaws.services.s3.internal.crypto.keywrap", + "com.amazonaws.services.s3.internal.crypto.v1", + "com.amazonaws.services.s3.internal.crypto.v2", + "com.amazonaws.services.s3.internal.eventstreaming", + "com.amazonaws.services.s3.iterable", + "com.amazonaws.services.s3.metrics", + "com.amazonaws.services.s3.model", + "com.amazonaws.services.s3.model.analytics", + "com.amazonaws.services.s3.model.intelligenttiering", + "com.amazonaws.services.s3.model.inventory", + "com.amazonaws.services.s3.model.lifecycle", + "com.amazonaws.services.s3.model.metrics", + "com.amazonaws.services.s3.model.ownership", + "com.amazonaws.services.s3.model.replication", + "com.amazonaws.services.s3.model.transform", + "com.amazonaws.services.s3.request", + "com.amazonaws.services.s3.transfer", + "com.amazonaws.services.s3.transfer.exception", + "com.amazonaws.services.s3.transfer.internal", + "com.amazonaws.services.s3.transfer.internal.future", + "com.amazonaws.services.s3.transfer.model", + "com.amazonaws.services.s3.waiters" + ], + "com.amazonaws:aws-java-sdk-secretsmanager": [ + "com.amazonaws.services.secretsmanager", + "com.amazonaws.services.secretsmanager.model", + "com.amazonaws.services.secretsmanager.model.transform" + ], + "com.amazonaws:jmespath-java": [ + "com.amazonaws.jmespath" + ], + "com.esotericsoftware:kryo": [ + "com.esotericsoftware.kryo", + "com.esotericsoftware.kryo.io", + "com.esotericsoftware.kryo.serializers", + "com.esotericsoftware.kryo.unsafe", + "com.esotericsoftware.kryo.util" + ], + "com.esotericsoftware:minlog": [ + "com.esotericsoftware.minlog" + ], + "com.esotericsoftware:reflectasm": [ + "com.esotericsoftware.asm", + "com.esotericsoftware.reflectasm" + ], + "com.fasterxml.jackson.core:jackson-annotations": [ + "com.fasterxml.jackson.annotation" + ], + "com.fasterxml.jackson.core:jackson-core": [ + "com.fasterxml.jackson.core", + "com.fasterxml.jackson.core.async", + "com.fasterxml.jackson.core.base", + "com.fasterxml.jackson.core.exc", + "com.fasterxml.jackson.core.filter", + "com.fasterxml.jackson.core.format", + "com.fasterxml.jackson.core.io", + "com.fasterxml.jackson.core.io.doubleparser", + "com.fasterxml.jackson.core.io.schubfach", + "com.fasterxml.jackson.core.json", + "com.fasterxml.jackson.core.json.async", + "com.fasterxml.jackson.core.sym", + "com.fasterxml.jackson.core.type", + "com.fasterxml.jackson.core.util" + ], + "com.fasterxml.jackson.core:jackson-databind": [ + "com.fasterxml.jackson.databind", + "com.fasterxml.jackson.databind.annotation", + "com.fasterxml.jackson.databind.cfg", + "com.fasterxml.jackson.databind.deser", + "com.fasterxml.jackson.databind.deser.impl", + "com.fasterxml.jackson.databind.deser.std", + "com.fasterxml.jackson.databind.exc", + "com.fasterxml.jackson.databind.ext", + "com.fasterxml.jackson.databind.introspect", + "com.fasterxml.jackson.databind.jdk14", + "com.fasterxml.jackson.databind.json", + "com.fasterxml.jackson.databind.jsonFormatVisitors", + "com.fasterxml.jackson.databind.jsonschema", + "com.fasterxml.jackson.databind.jsontype", + "com.fasterxml.jackson.databind.jsontype.impl", + "com.fasterxml.jackson.databind.module", + "com.fasterxml.jackson.databind.node", + "com.fasterxml.jackson.databind.ser", + "com.fasterxml.jackson.databind.ser.impl", + "com.fasterxml.jackson.databind.ser.std", + "com.fasterxml.jackson.databind.type", + "com.fasterxml.jackson.databind.util", + "com.fasterxml.jackson.databind.util.internal" + ], + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": [ + "com.fasterxml.jackson.dataformat.cbor", + "com.fasterxml.jackson.dataformat.cbor.databind" + ], + "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": [ + "com.fasterxml.jackson.dataformat.yaml", + "com.fasterxml.jackson.dataformat.yaml.snakeyaml.error", + "com.fasterxml.jackson.dataformat.yaml.util" + ], + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base": [ + "com.fasterxml.jackson.jaxrs.annotation", + "com.fasterxml.jackson.jaxrs.base", + "com.fasterxml.jackson.jaxrs.base.nocontent", + "com.fasterxml.jackson.jaxrs.cfg", + "com.fasterxml.jackson.jaxrs.util" + ], + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider": [ + "com.fasterxml.jackson.jaxrs.json", + "com.fasterxml.jackson.jaxrs.json.annotation" + ], + "com.fasterxml.jackson.module:jackson-module-jaxb-annotations": [ + "com.fasterxml.jackson.module.jaxb", + "com.fasterxml.jackson.module.jaxb.deser", + "com.fasterxml.jackson.module.jaxb.ser" + ], + "com.github.ben-manes.caffeine:caffeine": [ + "com.github.benmanes.caffeine.cache", + "com.github.benmanes.caffeine.cache.stats" + ], + "com.github.docker-java:docker-java": [ + "com.github.dockerjava.core" + ], + "com.github.docker-java:docker-java-api": [ + "com.github.dockerjava.api", + "com.github.dockerjava.api.async", + "com.github.dockerjava.api.command", + "com.github.dockerjava.api.exception", + "com.github.dockerjava.api.model" + ], + "com.github.docker-java:docker-java-core": [ + "com.github.dockerjava.core", + "com.github.dockerjava.core.async", + "com.github.dockerjava.core.command", + "com.github.dockerjava.core.dockerfile", + "com.github.dockerjava.core.exception", + "com.github.dockerjava.core.exec", + "com.github.dockerjava.core.util" + ], + "com.github.docker-java:docker-java-transport": [ + "com.github.dockerjava.transport" + ], + "com.github.docker-java:docker-java-transport-jersey": [ + "com.github.dockerjava.jaxrs", + "com.github.dockerjava.jaxrs.filter", + "com.github.dockerjava.jaxrs.util" + ], + "com.github.docker-java:docker-java-transport-netty": [ + "com.github.dockerjava.netty", + "com.github.dockerjava.netty.handler" + ], + "com.github.fppt:jedis-mock": [ + "com.github.fppt.jedismock", + "com.github.fppt.jedismock.commands", + "com.github.fppt.jedismock.datastructures", + "com.github.fppt.jedismock.exception", + "com.github.fppt.jedismock.operations", + "com.github.fppt.jedismock.operations.bitmaps", + "com.github.fppt.jedismock.operations.cluster", + "com.github.fppt.jedismock.operations.connection", + "com.github.fppt.jedismock.operations.hashes", + "com.github.fppt.jedismock.operations.hyperloglog", + "com.github.fppt.jedismock.operations.keys", + "com.github.fppt.jedismock.operations.lists", + "com.github.fppt.jedismock.operations.pubsub", + "com.github.fppt.jedismock.operations.scripting", + "com.github.fppt.jedismock.operations.server", + "com.github.fppt.jedismock.operations.sets", + "com.github.fppt.jedismock.operations.sortedsets", + "com.github.fppt.jedismock.operations.strings", + "com.github.fppt.jedismock.operations.transactions", + "com.github.fppt.jedismock.server", + "com.github.fppt.jedismock.storage" + ], + "com.github.jnr:jffi": [ + "com.kenai.jffi", + "com.kenai.jffi.internal" + ], + "com.github.jnr:jnr-a64asm": [ + "jnr.a64asm" + ], + "com.github.jnr:jnr-constants": [ + "jnr.constants", + "jnr.constants.platform", + "jnr.constants.platform.aix", + "jnr.constants.platform.darwin", + "jnr.constants.platform.dragonflybsd", + "jnr.constants.platform.fake", + "jnr.constants.platform.freebsd", + "jnr.constants.platform.freebsd.aarch64", + "jnr.constants.platform.linux", + "jnr.constants.platform.linux.aarch64", + "jnr.constants.platform.linux.loongarch64", + "jnr.constants.platform.linux.mips64el", + "jnr.constants.platform.linux.powerpc64", + "jnr.constants.platform.linux.s390x", + "jnr.constants.platform.openbsd", + "jnr.constants.platform.solaris", + "jnr.constants.platform.windows" + ], + "com.github.jnr:jnr-ffi": [ + "jnr.ffi", + "jnr.ffi.annotations", + "jnr.ffi.byref", + "jnr.ffi.mapper", + "jnr.ffi.provider", + "jnr.ffi.provider.converters", + "jnr.ffi.provider.jffi", + "jnr.ffi.provider.jffi.platform.aarch64.darwin", + "jnr.ffi.provider.jffi.platform.aarch64.freebsd", + "jnr.ffi.provider.jffi.platform.aarch64.linux", + "jnr.ffi.provider.jffi.platform.arm.linux", + "jnr.ffi.provider.jffi.platform.i386.darwin", + "jnr.ffi.provider.jffi.platform.i386.freebsd", + "jnr.ffi.provider.jffi.platform.i386.linux", + "jnr.ffi.provider.jffi.platform.i386.midnightbsd", + "jnr.ffi.provider.jffi.platform.i386.openbsd", + "jnr.ffi.provider.jffi.platform.i386.solaris", + "jnr.ffi.provider.jffi.platform.i386.windows", + "jnr.ffi.provider.jffi.platform.loongarch64.linux", + "jnr.ffi.provider.jffi.platform.mips.linux", + "jnr.ffi.provider.jffi.platform.mips64.linux", + "jnr.ffi.provider.jffi.platform.mips64el.linux", + "jnr.ffi.provider.jffi.platform.mipsel.linux", + "jnr.ffi.provider.jffi.platform.ppc.aix", + "jnr.ffi.provider.jffi.platform.ppc.darwin", + "jnr.ffi.provider.jffi.platform.ppc.linux", + "jnr.ffi.provider.jffi.platform.ppc64.aix", + "jnr.ffi.provider.jffi.platform.ppc64.freebsd", + "jnr.ffi.provider.jffi.platform.ppc64.ibmi", + "jnr.ffi.provider.jffi.platform.ppc64.linux", + "jnr.ffi.provider.jffi.platform.ppc64le.linux", + "jnr.ffi.provider.jffi.platform.s390.linux", + "jnr.ffi.provider.jffi.platform.s390x.linux", + "jnr.ffi.provider.jffi.platform.sparc.solaris", + "jnr.ffi.provider.jffi.platform.sparcv9.linux", + "jnr.ffi.provider.jffi.platform.sparcv9.solaris", + "jnr.ffi.provider.jffi.platform.x86_64.darwin", + "jnr.ffi.provider.jffi.platform.x86_64.dragonfly", + "jnr.ffi.provider.jffi.platform.x86_64.freebsd", + "jnr.ffi.provider.jffi.platform.x86_64.linux", + "jnr.ffi.provider.jffi.platform.x86_64.midnightbsd", + "jnr.ffi.provider.jffi.platform.x86_64.openbsd", + "jnr.ffi.provider.jffi.platform.x86_64.solaris", + "jnr.ffi.provider.jffi.platform.x86_64.windows", + "jnr.ffi.types", + "jnr.ffi.util", + "jnr.ffi.util.ref", + "jnr.ffi.util.ref.internal" + ], + "com.github.jnr:jnr-posix": [ + "jnr.posix", + "jnr.posix.util", + "jnr.posix.windows" + ], + "com.github.jnr:jnr-x86asm": [ + "com.kenai.jnr.x86asm", + "jnr.x86asm" + ], + "com.github.kevinstern:software-and-algorithms": [ + "blogspot.software_and_algorithms.stern_library.data_structure", + "blogspot.software_and_algorithms.stern_library.geometry", + "blogspot.software_and_algorithms.stern_library.optimization", + "blogspot.software_and_algorithms.stern_library.string" + ], + "com.github.luben:zstd-jni": [ + "com.github.luben.zstd", + "com.github.luben.zstd.util" + ], + "com.github.oshi:oshi-core": [ + "oshi", + "oshi.annotation.concurrent", + "oshi.driver.linux", + "oshi.driver.linux.proc", + "oshi.driver.mac", + "oshi.driver.mac.disk", + "oshi.driver.mac.net", + "oshi.driver.unix", + "oshi.driver.unix.aix", + "oshi.driver.unix.aix.perfstat", + "oshi.driver.unix.freebsd", + "oshi.driver.unix.freebsd.disk", + "oshi.driver.unix.openbsd.disk", + "oshi.driver.unix.solaris", + "oshi.driver.unix.solaris.disk", + "oshi.driver.unix.solaris.kstat", + "oshi.driver.windows", + "oshi.driver.windows.perfmon", + "oshi.driver.windows.registry", + "oshi.driver.windows.wmi", + "oshi.hardware", + "oshi.hardware.common", + "oshi.hardware.platform.linux", + "oshi.hardware.platform.mac", + "oshi.hardware.platform.unix", + "oshi.hardware.platform.unix.aix", + "oshi.hardware.platform.unix.freebsd", + "oshi.hardware.platform.unix.openbsd", + "oshi.hardware.platform.unix.solaris", + "oshi.hardware.platform.windows", + "oshi.jna", + "oshi.jna.platform.linux", + "oshi.jna.platform.mac", + "oshi.jna.platform.unix", + "oshi.jna.platform.windows", + "oshi.software.common", + "oshi.software.os", + "oshi.software.os.linux", + "oshi.software.os.mac", + "oshi.software.os.unix.aix", + "oshi.software.os.unix.freebsd", + "oshi.software.os.unix.openbsd", + "oshi.software.os.unix.solaris", + "oshi.software.os.windows", + "oshi.util", + "oshi.util.platform.linux", + "oshi.util.platform.mac", + "oshi.util.platform.unix.freebsd", + "oshi.util.platform.unix.openbsd", + "oshi.util.platform.unix.solaris", + "oshi.util.platform.windows", + "oshi.util.tuples" + ], + "com.github.pcj:google-options": [ + "com.google.devtools.common.options" + ], + "com.github.serceman:jnr-fuse": [ + "jnr.ffi", + "jnr.ffi.provider.jffi", + "ru.serce.jnrfuse", + "ru.serce.jnrfuse.examples", + "ru.serce.jnrfuse.flags", + "ru.serce.jnrfuse.struct", + "ru.serce.jnrfuse.utils" + ], + "com.google.android:annotations": [ + "android.annotation" + ], + "com.google.api.grpc:proto-google-common-protos": [ + "com.google.api", + "com.google.cloud", + "com.google.cloud.audit", + "com.google.cloud.location", + "com.google.geo.type", + "com.google.logging.type", + "com.google.longrunning", + "com.google.rpc", + "com.google.rpc.context", + "com.google.type" + ], + "com.google.auth:google-auth-library-credentials": [ + "com.google.auth" + ], + "com.google.auth:google-auth-library-oauth2-http": [ + "com.google.auth.http", + "com.google.auth.oauth2" + ], + "com.google.auto.service:auto-service-annotations": [ + "com.google.auto.service" + ], + "com.google.auto.value:auto-value-annotations": [ + "com.google.auto.value", + "com.google.auto.value.extension.memoized", + "com.google.auto.value.extension.serializable", + "com.google.auto.value.extension.toprettystring" + ], + "com.google.auto:auto-common": [ + "com.google.auto.common" + ], + "com.google.code.findbugs:jsr305": [ + "javax.annotation", + "javax.annotation.concurrent", + "javax.annotation.meta" + ], + "com.google.code.gson:gson": [ + "com.google.gson", + "com.google.gson.annotations", + "com.google.gson.internal", + "com.google.gson.internal.bind", + "com.google.gson.internal.bind.util", + "com.google.gson.internal.reflect", + "com.google.gson.internal.sql", + "com.google.gson.reflect", + "com.google.gson.stream" + ], + "com.google.errorprone:error_prone_annotation": [ + "com.google.errorprone" + ], + "com.google.errorprone:error_prone_annotations": [ + "com.google.errorprone.annotations", + "com.google.errorprone.annotations.concurrent" + ], + "com.google.errorprone:error_prone_check_api": [ + "com.google.errorprone", + "com.google.errorprone.apply", + "com.google.errorprone.bugpatterns", + "com.google.errorprone.dataflow", + "com.google.errorprone.dataflow.nullnesspropagation", + "com.google.errorprone.dataflow.nullnesspropagation.inference", + "com.google.errorprone.fixes", + "com.google.errorprone.matchers", + "com.google.errorprone.matchers.method", + "com.google.errorprone.names", + "com.google.errorprone.predicates", + "com.google.errorprone.predicates.type", + "com.google.errorprone.scanner", + "com.google.errorprone.suppliers", + "com.google.errorprone.util" + ], + "com.google.errorprone:error_prone_core": [ + "com.google.errorprone", + "com.google.errorprone.bugpatterns", + "com.google.errorprone.bugpatterns.android", + "com.google.errorprone.bugpatterns.apidiff", + "com.google.errorprone.bugpatterns.argumentselectiondefects", + "com.google.errorprone.bugpatterns.checkreturnvalue", + "com.google.errorprone.bugpatterns.collectionincompatibletype", + "com.google.errorprone.bugpatterns.flogger", + "com.google.errorprone.bugpatterns.formatstring", + "com.google.errorprone.bugpatterns.inject", + "com.google.errorprone.bugpatterns.inject.dagger", + "com.google.errorprone.bugpatterns.inject.guice", + "com.google.errorprone.bugpatterns.inlineme", + "com.google.errorprone.bugpatterns.javadoc", + "com.google.errorprone.bugpatterns.nullness", + "com.google.errorprone.bugpatterns.overloading", + "com.google.errorprone.bugpatterns.threadsafety", + "com.google.errorprone.bugpatterns.time", + "com.google.errorprone.refaster", + "com.google.errorprone.refaster.annotation", + "com.google.errorprone.scanner" + ], + "com.google.errorprone:error_prone_type_annotations": [ + "com.google.errorprone.annotations" + ], + "com.google.guava:failureaccess": [ + "com.google.common.util.concurrent.internal" + ], + "com.google.guava:guava": [ + "com.google.common.annotations", + "com.google.common.base", + "com.google.common.base.internal", + "com.google.common.cache", + "com.google.common.collect", + "com.google.common.escape", + "com.google.common.eventbus", + "com.google.common.graph", + "com.google.common.hash", + "com.google.common.html", + "com.google.common.io", + "com.google.common.math", + "com.google.common.net", + "com.google.common.primitives", + "com.google.common.reflect", + "com.google.common.util.concurrent", + "com.google.common.xml", + "com.google.thirdparty.publicsuffix" + ], + "com.google.guava:guava-testlib": [ + "com.google.common.collect.testing", + "com.google.common.collect.testing.features", + "com.google.common.collect.testing.google", + "com.google.common.collect.testing.testers", + "com.google.common.escape.testing", + "com.google.common.testing", + "com.google.common.util.concurrent.testing" + ], + "com.google.http-client:google-http-client": [ + "com.google.api.client.http", + "com.google.api.client.http.apache", + "com.google.api.client.http.javanet", + "com.google.api.client.http.json", + "com.google.api.client.json", + "com.google.api.client.json.rpc2", + "com.google.api.client.json.webtoken", + "com.google.api.client.testing.http", + "com.google.api.client.testing.http.apache", + "com.google.api.client.testing.http.javanet", + "com.google.api.client.testing.json", + "com.google.api.client.testing.json.webtoken", + "com.google.api.client.testing.util", + "com.google.api.client.util", + "com.google.api.client.util.escape", + "com.google.api.client.util.store" + ], + "com.google.http-client:google-http-client-gson": [ + "com.google.api.client.json.gson" + ], + "com.google.inject:guice": [ + "com.google.inject", + "com.google.inject.binder", + "com.google.inject.internal", + "com.google.inject.internal.aop", + "com.google.inject.internal.asm", + "com.google.inject.internal.util", + "com.google.inject.matcher", + "com.google.inject.multibindings", + "com.google.inject.name", + "com.google.inject.spi", + "com.google.inject.util" + ], + "com.google.j2objc:j2objc-annotations": [ + "com.google.j2objc.annotations" + ], + "com.google.jimfs:jimfs": [ + "com.google.common.jimfs" + ], + "com.google.protobuf:protobuf-java": [ + "com.google.protobuf", + "com.google.protobuf.compiler" + ], + "com.google.protobuf:protobuf-java-util": [ + "com.google.protobuf.util" + ], + "com.google.truth:truth": [ + "com.google.common.truth" + ], + "com.googlecode.json-simple:json-simple": [ + "org.json.simple", + "org.json.simple.parser" + ], + "com.jayway.jsonpath:json-path": [ + "com.jayway.jsonpath", + "com.jayway.jsonpath.internal", + "com.jayway.jsonpath.internal.filter", + "com.jayway.jsonpath.internal.function", + "com.jayway.jsonpath.internal.function.json", + "com.jayway.jsonpath.internal.function.latebinding", + "com.jayway.jsonpath.internal.function.numeric", + "com.jayway.jsonpath.internal.function.sequence", + "com.jayway.jsonpath.internal.function.text", + "com.jayway.jsonpath.internal.path", + "com.jayway.jsonpath.spi.cache", + "com.jayway.jsonpath.spi.json", + "com.jayway.jsonpath.spi.mapper" + ], + "com.kohlschutter.junixsocket:junixsocket-common": [ + "org.newsclub.net.unix" + ], + "com.kohlschutter.junixsocket:junixsocket-native-common": [ + "org.newsclub.lib.junixsocket.common" + ], + "com.sun.activation:jakarta.activation": [ + "com.sun.activation.registries", + "com.sun.activation.viewers", + "javax.activation" + ], + "commons-codec:commons-codec": [ + "org.apache.commons.codec", + "org.apache.commons.codec.binary", + "org.apache.commons.codec.cli", + "org.apache.commons.codec.digest", + "org.apache.commons.codec.language", + "org.apache.commons.codec.language.bm", + "org.apache.commons.codec.net" + ], + "commons-io:commons-io": [ + "org.apache.commons.io", + "org.apache.commons.io.comparator", + "org.apache.commons.io.filefilter", + "org.apache.commons.io.input", + "org.apache.commons.io.monitor", + "org.apache.commons.io.output", + "org.apache.commons.io.serialization" + ], + "commons-logging:commons-logging": [ + "org.apache.commons.logging", + "org.apache.commons.logging.impl" + ], + "io.github.eisop:dataflow-errorprone": [ + "org.checkerframework.dataflow.qual", + "org.checkerframework.errorprone.checker.builder.qual", + "org.checkerframework.errorprone.checker.calledmethods.qual", + "org.checkerframework.errorprone.checker.compilermsgs.qual", + "org.checkerframework.errorprone.checker.fenum.qual", + "org.checkerframework.errorprone.checker.formatter.qual", + "org.checkerframework.errorprone.checker.guieffect.qual", + "org.checkerframework.errorprone.checker.i18n.qual", + "org.checkerframework.errorprone.checker.i18nformatter.qual", + "org.checkerframework.errorprone.checker.index.qual", + "org.checkerframework.errorprone.checker.initialization.qual", + "org.checkerframework.errorprone.checker.interning.qual", + "org.checkerframework.errorprone.checker.lock.qual", + "org.checkerframework.errorprone.checker.mustcall.qual", + "org.checkerframework.errorprone.checker.nullness.qual", + "org.checkerframework.errorprone.checker.optional.qual", + "org.checkerframework.errorprone.checker.propkey.qual", + "org.checkerframework.errorprone.checker.regex.qual", + "org.checkerframework.errorprone.checker.signature.qual", + "org.checkerframework.errorprone.checker.signedness.qual", + "org.checkerframework.errorprone.checker.tainting.qual", + "org.checkerframework.errorprone.checker.units.qual", + "org.checkerframework.errorprone.common.aliasing.qual", + "org.checkerframework.errorprone.common.initializedfields.qual", + "org.checkerframework.errorprone.common.reflection.qual", + "org.checkerframework.errorprone.common.returnsreceiver.qual", + "org.checkerframework.errorprone.common.subtyping.qual", + "org.checkerframework.errorprone.common.util.report.qual", + "org.checkerframework.errorprone.common.value.qual", + "org.checkerframework.errorprone.dataflow.analysis", + "org.checkerframework.errorprone.dataflow.busyexpr", + "org.checkerframework.errorprone.dataflow.cfg", + "org.checkerframework.errorprone.dataflow.cfg.block", + "org.checkerframework.errorprone.dataflow.cfg.builder", + "org.checkerframework.errorprone.dataflow.cfg.node", + "org.checkerframework.errorprone.dataflow.cfg.playground", + "org.checkerframework.errorprone.dataflow.cfg.visualize", + "org.checkerframework.errorprone.dataflow.constantpropagation", + "org.checkerframework.errorprone.dataflow.expression", + "org.checkerframework.errorprone.dataflow.livevariable", + "org.checkerframework.errorprone.dataflow.reachingdef", + "org.checkerframework.errorprone.dataflow.util", + "org.checkerframework.errorprone.framework.qual", + "org.checkerframework.errorprone.javacutil", + "org.checkerframework.errorprone.javacutil.trees", + "org.checkerframework.errorprone.org.plumelib.reflection", + "org.checkerframework.errorprone.org.plumelib.util" + ], + "io.github.java-diff-utils:java-diff-utils": [ + "com.github.difflib", + "com.github.difflib.algorithm", + "com.github.difflib.algorithm.myers", + "com.github.difflib.patch", + "com.github.difflib.text", + "com.github.difflib.unifieddiff" + ], + "io.grpc:grpc-api": [ + "io.grpc" + ], + "io.grpc:grpc-auth": [ + "io.grpc.auth" + ], + "io.grpc:grpc-core": [ + "io.grpc.internal" + ], + "io.grpc:grpc-inprocess": [ + "io.grpc.inprocess" + ], + "io.grpc:grpc-netty": [ + "io.grpc.netty" + ], + "io.grpc:grpc-netty-shaded": [ + "io.grpc.netty.shaded.io.grpc.netty", + "io.grpc.netty.shaded.io.netty.bootstrap", + "io.grpc.netty.shaded.io.netty.buffer", + "io.grpc.netty.shaded.io.netty.buffer.search", + "io.grpc.netty.shaded.io.netty.channel", + "io.grpc.netty.shaded.io.netty.channel.embedded", + "io.grpc.netty.shaded.io.netty.channel.epoll", + "io.grpc.netty.shaded.io.netty.channel.group", + "io.grpc.netty.shaded.io.netty.channel.internal", + "io.grpc.netty.shaded.io.netty.channel.local", + "io.grpc.netty.shaded.io.netty.channel.nio", + "io.grpc.netty.shaded.io.netty.channel.oio", + "io.grpc.netty.shaded.io.netty.channel.pool", + "io.grpc.netty.shaded.io.netty.channel.socket", + "io.grpc.netty.shaded.io.netty.channel.socket.nio", + "io.grpc.netty.shaded.io.netty.channel.socket.oio", + "io.grpc.netty.shaded.io.netty.channel.unix", + "io.grpc.netty.shaded.io.netty.handler.address", + "io.grpc.netty.shaded.io.netty.handler.codec", + "io.grpc.netty.shaded.io.netty.handler.codec.base64", + "io.grpc.netty.shaded.io.netty.handler.codec.bytes", + "io.grpc.netty.shaded.io.netty.handler.codec.compression", + "io.grpc.netty.shaded.io.netty.handler.codec.http", + "io.grpc.netty.shaded.io.netty.handler.codec.http.cookie", + "io.grpc.netty.shaded.io.netty.handler.codec.http.cors", + "io.grpc.netty.shaded.io.netty.handler.codec.http.multipart", + "io.grpc.netty.shaded.io.netty.handler.codec.http.websocketx", + "io.grpc.netty.shaded.io.netty.handler.codec.http.websocketx.extensions", + "io.grpc.netty.shaded.io.netty.handler.codec.http.websocketx.extensions.compression", + "io.grpc.netty.shaded.io.netty.handler.codec.http2", + "io.grpc.netty.shaded.io.netty.handler.codec.json", + "io.grpc.netty.shaded.io.netty.handler.codec.marshalling", + "io.grpc.netty.shaded.io.netty.handler.codec.protobuf", + "io.grpc.netty.shaded.io.netty.handler.codec.rtsp", + "io.grpc.netty.shaded.io.netty.handler.codec.serialization", + "io.grpc.netty.shaded.io.netty.handler.codec.socks", + "io.grpc.netty.shaded.io.netty.handler.codec.socksx", + "io.grpc.netty.shaded.io.netty.handler.codec.socksx.v4", + "io.grpc.netty.shaded.io.netty.handler.codec.socksx.v5", + "io.grpc.netty.shaded.io.netty.handler.codec.spdy", + "io.grpc.netty.shaded.io.netty.handler.codec.string", + "io.grpc.netty.shaded.io.netty.handler.codec.xml", + "io.grpc.netty.shaded.io.netty.handler.flow", + "io.grpc.netty.shaded.io.netty.handler.flush", + "io.grpc.netty.shaded.io.netty.handler.ipfilter", + "io.grpc.netty.shaded.io.netty.handler.logging", + "io.grpc.netty.shaded.io.netty.handler.pcap", + "io.grpc.netty.shaded.io.netty.handler.proxy", + "io.grpc.netty.shaded.io.netty.handler.ssl", + "io.grpc.netty.shaded.io.netty.handler.ssl.ocsp", + "io.grpc.netty.shaded.io.netty.handler.ssl.util", + "io.grpc.netty.shaded.io.netty.handler.stream", + "io.grpc.netty.shaded.io.netty.handler.timeout", + "io.grpc.netty.shaded.io.netty.handler.traffic", + "io.grpc.netty.shaded.io.netty.internal.tcnative", + "io.grpc.netty.shaded.io.netty.resolver", + "io.grpc.netty.shaded.io.netty.util", + "io.grpc.netty.shaded.io.netty.util.collection", + "io.grpc.netty.shaded.io.netty.util.concurrent", + "io.grpc.netty.shaded.io.netty.util.internal", + "io.grpc.netty.shaded.io.netty.util.internal.logging", + "io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues", + "io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.atomic", + "io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util", + "io.grpc.netty.shaded.io.netty.util.internal.svm" + ], + "io.grpc:grpc-protobuf": [ + "io.grpc.protobuf" + ], + "io.grpc:grpc-protobuf-lite": [ + "io.grpc.protobuf.lite" + ], + "io.grpc:grpc-services": [ + "io.grpc.binarylog.v1", + "io.grpc.channelz.v1", + "io.grpc.health.v1", + "io.grpc.protobuf.services", + "io.grpc.protobuf.services.internal", + "io.grpc.reflection.v1alpha", + "io.grpc.services" + ], + "io.grpc:grpc-stub": [ + "io.grpc.stub", + "io.grpc.stub.annotations" + ], + "io.grpc:grpc-testing": [ + "io.grpc.internal.testing", + "io.grpc.testing" + ], + "io.grpc:grpc-util": [ + "io.grpc.util" + ], + "io.netty:netty-buffer": [ + "io.netty.buffer", + "io.netty.buffer.search" + ], + "io.netty:netty-codec": [ + "io.netty.handler.codec", + "io.netty.handler.codec.base64", + "io.netty.handler.codec.bytes", + "io.netty.handler.codec.compression", + "io.netty.handler.codec.json", + "io.netty.handler.codec.marshalling", + "io.netty.handler.codec.protobuf", + "io.netty.handler.codec.serialization", + "io.netty.handler.codec.string", + "io.netty.handler.codec.xml" + ], + "io.netty:netty-codec-dns": [ + "io.netty.handler.codec.dns" + ], + "io.netty:netty-codec-http": [ + "io.netty.handler.codec.http", + "io.netty.handler.codec.http.cookie", + "io.netty.handler.codec.http.cors", + "io.netty.handler.codec.http.multipart", + "io.netty.handler.codec.http.websocketx", + "io.netty.handler.codec.http.websocketx.extensions", + "io.netty.handler.codec.http.websocketx.extensions.compression", + "io.netty.handler.codec.rtsp", + "io.netty.handler.codec.spdy" + ], + "io.netty:netty-codec-http2": [ + "io.netty.handler.codec.http2" + ], + "io.netty:netty-codec-socks": [ + "io.netty.handler.codec.socks", + "io.netty.handler.codec.socksx", + "io.netty.handler.codec.socksx.v4", + "io.netty.handler.codec.socksx.v5" + ], + "io.netty:netty-common": [ + "io.netty.util", + "io.netty.util.collection", + "io.netty.util.concurrent", + "io.netty.util.internal", + "io.netty.util.internal.logging", + "io.netty.util.internal.shaded.org.jctools.queues", + "io.netty.util.internal.shaded.org.jctools.queues.atomic", + "io.netty.util.internal.shaded.org.jctools.util", + "io.netty.util.internal.svm" + ], + "io.netty:netty-handler": [ + "io.netty.handler.address", + "io.netty.handler.flow", + "io.netty.handler.flush", + "io.netty.handler.ipfilter", + "io.netty.handler.logging", + "io.netty.handler.pcap", + "io.netty.handler.ssl", + "io.netty.handler.ssl.ocsp", + "io.netty.handler.ssl.util", + "io.netty.handler.stream", + "io.netty.handler.timeout", + "io.netty.handler.traffic" + ], + "io.netty:netty-handler-proxy": [ + "io.netty.handler.proxy" + ], + "io.netty:netty-resolver": [ + "io.netty.resolver" + ], + "io.netty:netty-resolver-dns": [ + "io.netty.resolver.dns" + ], + "io.netty:netty-transport": [ + "io.netty.bootstrap", + "io.netty.channel", + "io.netty.channel.embedded", + "io.netty.channel.group", + "io.netty.channel.internal", + "io.netty.channel.local", + "io.netty.channel.nio", + "io.netty.channel.oio", + "io.netty.channel.pool", + "io.netty.channel.socket", + "io.netty.channel.socket.nio", + "io.netty.channel.socket.oio" + ], + "io.netty:netty-transport-classes-epoll": [ + "io.netty.channel.epoll" + ], + "io.netty:netty-transport-classes-kqueue": [ + "io.netty.channel.kqueue" + ], + "io.netty:netty-transport-native-unix-common": [ + "io.netty.channel.unix" + ], + "io.opencensus:opencensus-api": [ + "io.opencensus.common", + "io.opencensus.internal", + "io.opencensus.metrics", + "io.opencensus.metrics.data", + "io.opencensus.metrics.export", + "io.opencensus.resource", + "io.opencensus.stats", + "io.opencensus.tags", + "io.opencensus.tags.propagation", + "io.opencensus.tags.unsafe", + "io.opencensus.trace", + "io.opencensus.trace.config", + "io.opencensus.trace.export", + "io.opencensus.trace.internal", + "io.opencensus.trace.propagation", + "io.opencensus.trace.samplers", + "io.opencensus.trace.unsafe" + ], + "io.opencensus:opencensus-contrib-http-util": [ + "io.opencensus.contrib.http", + "io.opencensus.contrib.http.util" + ], + "io.perfmark:perfmark-api": [ + "io.perfmark" + ], + "io.projectreactor:reactor-core": [ + "reactor.adapter", + "reactor.core", + "reactor.core.observability", + "reactor.core.publisher", + "reactor.core.scheduler", + "reactor.util", + "reactor.util.annotation", + "reactor.util.concurrent", + "reactor.util.context", + "reactor.util.function", + "reactor.util.retry" + ], + "io.prometheus:simpleclient": [ + "io.prometheus.client", + "io.prometheus.client.exemplars" + ], + "io.prometheus:simpleclient_common": [ + "io.prometheus.client.exporter.common" + ], + "io.prometheus:simpleclient_hotspot": [ + "io.prometheus.client.hotspot" + ], + "io.prometheus:simpleclient_httpserver": [ + "io.prometheus.client.exporter" + ], + "io.prometheus:simpleclient_tracer_common": [ + "io.prometheus.client.exemplars.tracer.common" + ], + "io.prometheus:simpleclient_tracer_otel": [ + "io.prometheus.client.exemplars.tracer.otel" + ], + "io.prometheus:simpleclient_tracer_otel_agent": [ + "io.prometheus.client.exemplars.tracer.otel_agent" + ], + "io.reactivex.rxjava3:rxjava": [ + "io.reactivex.rxjava3.annotations", + "io.reactivex.rxjava3.core", + "io.reactivex.rxjava3.disposables", + "io.reactivex.rxjava3.exceptions", + "io.reactivex.rxjava3.flowables", + "io.reactivex.rxjava3.functions", + "io.reactivex.rxjava3.internal.disposables", + "io.reactivex.rxjava3.internal.functions", + "io.reactivex.rxjava3.internal.fuseable", + "io.reactivex.rxjava3.internal.jdk8", + "io.reactivex.rxjava3.internal.observers", + "io.reactivex.rxjava3.internal.operators.completable", + "io.reactivex.rxjava3.internal.operators.flowable", + "io.reactivex.rxjava3.internal.operators.maybe", + "io.reactivex.rxjava3.internal.operators.mixed", + "io.reactivex.rxjava3.internal.operators.observable", + "io.reactivex.rxjava3.internal.operators.parallel", + "io.reactivex.rxjava3.internal.operators.single", + "io.reactivex.rxjava3.internal.queue", + "io.reactivex.rxjava3.internal.schedulers", + "io.reactivex.rxjava3.internal.subscribers", + "io.reactivex.rxjava3.internal.subscriptions", + "io.reactivex.rxjava3.internal.util", + "io.reactivex.rxjava3.observables", + "io.reactivex.rxjava3.observers", + "io.reactivex.rxjava3.operators", + "io.reactivex.rxjava3.parallel", + "io.reactivex.rxjava3.plugins", + "io.reactivex.rxjava3.processors", + "io.reactivex.rxjava3.schedulers", + "io.reactivex.rxjava3.subjects", + "io.reactivex.rxjava3.subscribers" + ], + "jakarta.activation:jakarta.activation-api": [ + "javax.activation" + ], + "jakarta.annotation:jakarta.annotation-api": [ + "javax.annotation", + "javax.annotation.security", + "javax.annotation.sql" + ], + "jakarta.ws.rs:jakarta.ws.rs-api": [ + "javax.ws.rs", + "javax.ws.rs.client", + "javax.ws.rs.container", + "javax.ws.rs.core", + "javax.ws.rs.ext", + "javax.ws.rs.sse" + ], + "jakarta.xml.bind:jakarta.xml.bind-api": [ + "javax.xml.bind", + "javax.xml.bind.annotation", + "javax.xml.bind.annotation.adapters", + "javax.xml.bind.attachment", + "javax.xml.bind.helpers", + "javax.xml.bind.util" + ], + "javax.annotation:javax.annotation-api": [ + "javax.annotation", + "javax.annotation.security", + "javax.annotation.sql" + ], + "javax.cache:cache-api": [ + "javax.cache", + "javax.cache.annotation", + "javax.cache.configuration", + "javax.cache.event", + "javax.cache.expiry", + "javax.cache.integration", + "javax.cache.management", + "javax.cache.processor", + "javax.cache.spi" + ], + "javax.inject:javax.inject": [ + "javax.inject" + ], + "joda-time:joda-time": [ + "org.joda.time", + "org.joda.time.base", + "org.joda.time.chrono", + "org.joda.time.convert", + "org.joda.time.field", + "org.joda.time.format", + "org.joda.time.tz" + ], + "junit:junit": [ + "junit.extensions", + "junit.framework", + "junit.runner", + "junit.textui", + "org.junit", + "org.junit.experimental", + "org.junit.experimental.categories", + "org.junit.experimental.max", + "org.junit.experimental.results", + "org.junit.experimental.runners", + "org.junit.experimental.theories", + "org.junit.experimental.theories.internal", + "org.junit.experimental.theories.suppliers", + "org.junit.function", + "org.junit.internal", + "org.junit.internal.builders", + "org.junit.internal.management", + "org.junit.internal.matchers", + "org.junit.internal.requests", + "org.junit.internal.runners", + "org.junit.internal.runners.model", + "org.junit.internal.runners.rules", + "org.junit.internal.runners.statements", + "org.junit.matchers", + "org.junit.rules", + "org.junit.runner", + "org.junit.runner.manipulation", + "org.junit.runner.notification", + "org.junit.runners", + "org.junit.runners.model", + "org.junit.runners.parameterized", + "org.junit.validator" + ], + "me.dinowernli:java-grpc-prometheus": [ + "me.dinowernli.grpc.prometheus" + ], + "net.bytebuddy:byte-buddy": [ + "net.bytebuddy", + "net.bytebuddy.agent.builder", + "net.bytebuddy.asm", + "net.bytebuddy.build", + "net.bytebuddy.description", + "net.bytebuddy.description.annotation", + "net.bytebuddy.description.enumeration", + "net.bytebuddy.description.field", + "net.bytebuddy.description.method", + "net.bytebuddy.description.modifier", + "net.bytebuddy.description.type", + "net.bytebuddy.dynamic", + "net.bytebuddy.dynamic.loading", + "net.bytebuddy.dynamic.scaffold", + "net.bytebuddy.dynamic.scaffold.inline", + "net.bytebuddy.dynamic.scaffold.subclass", + "net.bytebuddy.implementation", + "net.bytebuddy.implementation.attribute", + "net.bytebuddy.implementation.auxiliary", + "net.bytebuddy.implementation.bind", + "net.bytebuddy.implementation.bind.annotation", + "net.bytebuddy.implementation.bytecode", + "net.bytebuddy.implementation.bytecode.assign", + "net.bytebuddy.implementation.bytecode.assign.primitive", + "net.bytebuddy.implementation.bytecode.assign.reference", + "net.bytebuddy.implementation.bytecode.collection", + "net.bytebuddy.implementation.bytecode.constant", + "net.bytebuddy.implementation.bytecode.member", + "net.bytebuddy.jar.asm", + "net.bytebuddy.jar.asm.commons", + "net.bytebuddy.jar.asm.signature", + "net.bytebuddy.matcher", + "net.bytebuddy.pool", + "net.bytebuddy.utility", + "net.bytebuddy.utility.dispatcher", + "net.bytebuddy.utility.nullability", + "net.bytebuddy.utility.privilege", + "net.bytebuddy.utility.visitor" + ], + "net.bytebuddy:byte-buddy-agent": [ + "net.bytebuddy.agent", + "net.bytebuddy.agent.utility.nullability" + ], + "net.java.dev.jna:jna": [ + "com.sun.jna", + "com.sun.jna.internal", + "com.sun.jna.ptr", + "com.sun.jna.win32" + ], + "net.java.dev.jna:jna-platform": [ + "com.sun.jna.platform", + "com.sun.jna.platform.dnd", + "com.sun.jna.platform.linux", + "com.sun.jna.platform.mac", + "com.sun.jna.platform.unix", + "com.sun.jna.platform.unix.aix", + "com.sun.jna.platform.unix.solaris", + "com.sun.jna.platform.win32", + "com.sun.jna.platform.win32.COM", + "com.sun.jna.platform.win32.COM.tlb", + "com.sun.jna.platform.win32.COM.tlb.imp", + "com.sun.jna.platform.win32.COM.util", + "com.sun.jna.platform.win32.COM.util.annotation", + "com.sun.jna.platform.wince" + ], + "net.javacrumbs.future-converter:future-converter-common": [ + "net.javacrumbs.futureconverter.common.internal" + ], + "net.javacrumbs.future-converter:future-converter-guava-common": [ + "net.javacrumbs.futureconverter.guavacommon" + ], + "net.javacrumbs.future-converter:future-converter-java8-common": [ + "net.javacrumbs.futureconverter.java8common" + ], + "net.javacrumbs.future-converter:future-converter-java8-guava": [ + "net.javacrumbs.futureconverter.java8guava" + ], + "net.jcip:jcip-annotations": [ + "net.jcip.annotations" + ], + "net.minidev:accessors-smart": [ + "net.minidev.asm", + "net.minidev.asm.ex" + ], + "net.minidev:json-smart": [ + "net.minidev.json", + "net.minidev.json.annotate", + "net.minidev.json.parser", + "net.minidev.json.reader", + "net.minidev.json.writer" + ], + "net.sf.jopt-simple:jopt-simple": [ + "joptsimple", + "joptsimple.internal", + "joptsimple.util" + ], + "org.apache.commons:commons-compress": [ + "org.apache.commons.compress", + "org.apache.commons.compress.archivers", + "org.apache.commons.compress.archivers.ar", + "org.apache.commons.compress.archivers.arj", + "org.apache.commons.compress.archivers.cpio", + "org.apache.commons.compress.archivers.dump", + "org.apache.commons.compress.archivers.examples", + "org.apache.commons.compress.archivers.jar", + "org.apache.commons.compress.archivers.sevenz", + "org.apache.commons.compress.archivers.tar", + "org.apache.commons.compress.archivers.zip", + "org.apache.commons.compress.changes", + "org.apache.commons.compress.compressors", + "org.apache.commons.compress.compressors.brotli", + "org.apache.commons.compress.compressors.bzip2", + "org.apache.commons.compress.compressors.deflate", + "org.apache.commons.compress.compressors.deflate64", + "org.apache.commons.compress.compressors.gzip", + "org.apache.commons.compress.compressors.lz4", + "org.apache.commons.compress.compressors.lz77support", + "org.apache.commons.compress.compressors.lzma", + "org.apache.commons.compress.compressors.lzw", + "org.apache.commons.compress.compressors.pack200", + "org.apache.commons.compress.compressors.snappy", + "org.apache.commons.compress.compressors.xz", + "org.apache.commons.compress.compressors.z", + "org.apache.commons.compress.compressors.zstandard", + "org.apache.commons.compress.harmony", + "org.apache.commons.compress.harmony.archive.internal.nls", + "org.apache.commons.compress.harmony.pack200", + "org.apache.commons.compress.harmony.unpack200", + "org.apache.commons.compress.harmony.unpack200.bytecode", + "org.apache.commons.compress.harmony.unpack200.bytecode.forms", + "org.apache.commons.compress.java.util.jar", + "org.apache.commons.compress.parallel", + "org.apache.commons.compress.utils" + ], + "org.apache.commons:commons-lang3": [ + "org.apache.commons.lang3", + "org.apache.commons.lang3.arch", + "org.apache.commons.lang3.builder", + "org.apache.commons.lang3.compare", + "org.apache.commons.lang3.concurrent", + "org.apache.commons.lang3.concurrent.locks", + "org.apache.commons.lang3.event", + "org.apache.commons.lang3.exception", + "org.apache.commons.lang3.function", + "org.apache.commons.lang3.math", + "org.apache.commons.lang3.mutable", + "org.apache.commons.lang3.reflect", + "org.apache.commons.lang3.stream", + "org.apache.commons.lang3.text", + "org.apache.commons.lang3.text.translate", + "org.apache.commons.lang3.time", + "org.apache.commons.lang3.tuple", + "org.apache.commons.lang3.util" + ], + "org.apache.commons:commons-math3": [ + "org.apache.commons.math3", + "org.apache.commons.math3.analysis", + "org.apache.commons.math3.analysis.differentiation", + "org.apache.commons.math3.analysis.function", + "org.apache.commons.math3.analysis.integration", + "org.apache.commons.math3.analysis.integration.gauss", + "org.apache.commons.math3.analysis.interpolation", + "org.apache.commons.math3.analysis.polynomials", + "org.apache.commons.math3.analysis.solvers", + "org.apache.commons.math3.complex", + "org.apache.commons.math3.dfp", + "org.apache.commons.math3.distribution", + "org.apache.commons.math3.distribution.fitting", + "org.apache.commons.math3.exception", + "org.apache.commons.math3.exception.util", + "org.apache.commons.math3.filter", + "org.apache.commons.math3.fitting", + "org.apache.commons.math3.fitting.leastsquares", + "org.apache.commons.math3.fraction", + "org.apache.commons.math3.genetics", + "org.apache.commons.math3.geometry", + "org.apache.commons.math3.geometry.enclosing", + "org.apache.commons.math3.geometry.euclidean.oned", + "org.apache.commons.math3.geometry.euclidean.threed", + "org.apache.commons.math3.geometry.euclidean.twod", + "org.apache.commons.math3.geometry.euclidean.twod.hull", + "org.apache.commons.math3.geometry.hull", + "org.apache.commons.math3.geometry.partitioning", + "org.apache.commons.math3.geometry.partitioning.utilities", + "org.apache.commons.math3.geometry.spherical.oned", + "org.apache.commons.math3.geometry.spherical.twod", + "org.apache.commons.math3.linear", + "org.apache.commons.math3.ml.clustering", + "org.apache.commons.math3.ml.clustering.evaluation", + "org.apache.commons.math3.ml.distance", + "org.apache.commons.math3.ml.neuralnet", + "org.apache.commons.math3.ml.neuralnet.oned", + "org.apache.commons.math3.ml.neuralnet.sofm", + "org.apache.commons.math3.ml.neuralnet.sofm.util", + "org.apache.commons.math3.ml.neuralnet.twod", + "org.apache.commons.math3.ml.neuralnet.twod.util", + "org.apache.commons.math3.ode", + "org.apache.commons.math3.ode.events", + "org.apache.commons.math3.ode.nonstiff", + "org.apache.commons.math3.ode.sampling", + "org.apache.commons.math3.optim", + "org.apache.commons.math3.optim.linear", + "org.apache.commons.math3.optim.nonlinear.scalar", + "org.apache.commons.math3.optim.nonlinear.scalar.gradient", + "org.apache.commons.math3.optim.nonlinear.scalar.noderiv", + "org.apache.commons.math3.optim.nonlinear.vector", + "org.apache.commons.math3.optim.nonlinear.vector.jacobian", + "org.apache.commons.math3.optim.univariate", + "org.apache.commons.math3.optimization", + "org.apache.commons.math3.optimization.direct", + "org.apache.commons.math3.optimization.fitting", + "org.apache.commons.math3.optimization.general", + "org.apache.commons.math3.optimization.linear", + "org.apache.commons.math3.optimization.univariate", + "org.apache.commons.math3.primes", + "org.apache.commons.math3.random", + "org.apache.commons.math3.special", + "org.apache.commons.math3.stat", + "org.apache.commons.math3.stat.clustering", + "org.apache.commons.math3.stat.correlation", + "org.apache.commons.math3.stat.descriptive", + "org.apache.commons.math3.stat.descriptive.moment", + "org.apache.commons.math3.stat.descriptive.rank", + "org.apache.commons.math3.stat.descriptive.summary", + "org.apache.commons.math3.stat.inference", + "org.apache.commons.math3.stat.interval", + "org.apache.commons.math3.stat.ranking", + "org.apache.commons.math3.stat.regression", + "org.apache.commons.math3.transform", + "org.apache.commons.math3.util" + ], + "org.apache.commons:commons-pool2": [ + "org.apache.commons.pool2", + "org.apache.commons.pool2.impl", + "org.apache.commons.pool2.proxy" + ], + "org.apache.httpcomponents:httpclient": [ + "org.apache.http.auth", + "org.apache.http.auth.params", + "org.apache.http.client", + "org.apache.http.client.config", + "org.apache.http.client.entity", + "org.apache.http.client.methods", + "org.apache.http.client.params", + "org.apache.http.client.protocol", + "org.apache.http.client.utils", + "org.apache.http.conn", + "org.apache.http.conn.params", + "org.apache.http.conn.routing", + "org.apache.http.conn.scheme", + "org.apache.http.conn.socket", + "org.apache.http.conn.ssl", + "org.apache.http.conn.util", + "org.apache.http.cookie", + "org.apache.http.cookie.params", + "org.apache.http.impl.auth", + "org.apache.http.impl.client", + "org.apache.http.impl.conn", + "org.apache.http.impl.conn.tsccm", + "org.apache.http.impl.cookie", + "org.apache.http.impl.execchain" + ], + "org.apache.httpcomponents:httpcore": [ + "org.apache.http", + "org.apache.http.annotation", + "org.apache.http.concurrent", + "org.apache.http.config", + "org.apache.http.entity", + "org.apache.http.impl", + "org.apache.http.impl.bootstrap", + "org.apache.http.impl.entity", + "org.apache.http.impl.io", + "org.apache.http.impl.pool", + "org.apache.http.io", + "org.apache.http.message", + "org.apache.http.params", + "org.apache.http.pool", + "org.apache.http.protocol", + "org.apache.http.ssl", + "org.apache.http.util" + ], + "org.apache.tomcat:annotations-api": [ + "javax.annotation", + "javax.annotation.security", + "javax.ejb", + "javax.persistence", + "javax.xml.ws" + ], + "org.bouncycastle:bcpkix-jdk18on": [ + "org.bouncycastle.cert", + "org.bouncycastle.cert.bc", + "org.bouncycastle.cert.cmp", + "org.bouncycastle.cert.crmf", + "org.bouncycastle.cert.crmf.bc", + "org.bouncycastle.cert.crmf.jcajce", + "org.bouncycastle.cert.dane", + "org.bouncycastle.cert.dane.fetcher", + "org.bouncycastle.cert.jcajce", + "org.bouncycastle.cert.ocsp", + "org.bouncycastle.cert.ocsp.jcajce", + "org.bouncycastle.cert.path", + "org.bouncycastle.cert.path.validations", + "org.bouncycastle.cert.selector", + "org.bouncycastle.cert.selector.jcajce", + "org.bouncycastle.cmc", + "org.bouncycastle.cms", + "org.bouncycastle.cms.bc", + "org.bouncycastle.cms.jcajce", + "org.bouncycastle.dvcs", + "org.bouncycastle.eac", + "org.bouncycastle.eac.jcajce", + "org.bouncycastle.eac.operator", + "org.bouncycastle.eac.operator.jcajce", + "org.bouncycastle.est", + "org.bouncycastle.est.jcajce", + "org.bouncycastle.its", + "org.bouncycastle.its.bc", + "org.bouncycastle.its.jcajce", + "org.bouncycastle.its.operator", + "org.bouncycastle.mime", + "org.bouncycastle.mime.encoding", + "org.bouncycastle.mime.smime", + "org.bouncycastle.mozilla", + "org.bouncycastle.mozilla.jcajce", + "org.bouncycastle.openssl", + "org.bouncycastle.openssl.bc", + "org.bouncycastle.openssl.jcajce", + "org.bouncycastle.operator", + "org.bouncycastle.operator.bc", + "org.bouncycastle.operator.jcajce", + "org.bouncycastle.pkcs", + "org.bouncycastle.pkcs.bc", + "org.bouncycastle.pkcs.jcajce", + "org.bouncycastle.pkix", + "org.bouncycastle.pkix.jcajce", + "org.bouncycastle.pkix.util", + "org.bouncycastle.pkix.util.filter", + "org.bouncycastle.tsp", + "org.bouncycastle.tsp.cms", + "org.bouncycastle.tsp.ers", + "org.bouncycastle.voms" + ], + "org.bouncycastle:bcprov-jdk15on": [ + "org.bouncycastle", + "org.bouncycastle.asn1", + "org.bouncycastle.asn1.anssi", + "org.bouncycastle.asn1.bc", + "org.bouncycastle.asn1.cryptlib", + "org.bouncycastle.asn1.cryptopro", + "org.bouncycastle.asn1.edec", + "org.bouncycastle.asn1.gm", + "org.bouncycastle.asn1.gnu", + "org.bouncycastle.asn1.iana", + "org.bouncycastle.asn1.isara", + "org.bouncycastle.asn1.iso", + "org.bouncycastle.asn1.kisa", + "org.bouncycastle.asn1.microsoft", + "org.bouncycastle.asn1.misc", + "org.bouncycastle.asn1.mozilla", + "org.bouncycastle.asn1.nist", + "org.bouncycastle.asn1.nsri", + "org.bouncycastle.asn1.ntt", + "org.bouncycastle.asn1.ocsp", + "org.bouncycastle.asn1.oiw", + "org.bouncycastle.asn1.pkcs", + "org.bouncycastle.asn1.rosstandart", + "org.bouncycastle.asn1.sec", + "org.bouncycastle.asn1.teletrust", + "org.bouncycastle.asn1.ua", + "org.bouncycastle.asn1.util", + "org.bouncycastle.asn1.x500", + "org.bouncycastle.asn1.x500.style", + "org.bouncycastle.asn1.x509", + "org.bouncycastle.asn1.x509.qualified", + "org.bouncycastle.asn1.x509.sigi", + "org.bouncycastle.asn1.x9", + "org.bouncycastle.crypto", + "org.bouncycastle.crypto.agreement", + "org.bouncycastle.crypto.agreement.jpake", + "org.bouncycastle.crypto.agreement.kdf", + "org.bouncycastle.crypto.agreement.srp", + "org.bouncycastle.crypto.commitments", + "org.bouncycastle.crypto.digests", + "org.bouncycastle.crypto.ec", + "org.bouncycastle.crypto.encodings", + "org.bouncycastle.crypto.engines", + "org.bouncycastle.crypto.examples", + "org.bouncycastle.crypto.fpe", + "org.bouncycastle.crypto.generators", + "org.bouncycastle.crypto.io", + "org.bouncycastle.crypto.kems", + "org.bouncycastle.crypto.macs", + "org.bouncycastle.crypto.modes", + "org.bouncycastle.crypto.modes.gcm", + "org.bouncycastle.crypto.modes.kgcm", + "org.bouncycastle.crypto.paddings", + "org.bouncycastle.crypto.params", + "org.bouncycastle.crypto.parsers", + "org.bouncycastle.crypto.prng", + "org.bouncycastle.crypto.prng.drbg", + "org.bouncycastle.crypto.signers", + "org.bouncycastle.crypto.util", + "org.bouncycastle.i18n", + "org.bouncycastle.i18n.filter", + "org.bouncycastle.iana", + "org.bouncycastle.internal.asn1.bsi", + "org.bouncycastle.internal.asn1.cms", + "org.bouncycastle.internal.asn1.eac", + "org.bouncycastle.internal.asn1.isismtt", + "org.bouncycastle.jcajce", + "org.bouncycastle.jcajce.interfaces", + "org.bouncycastle.jcajce.io", + "org.bouncycastle.jcajce.provider.asymmetric", + "org.bouncycastle.jcajce.provider.asymmetric.dh", + "org.bouncycastle.jcajce.provider.asymmetric.dsa", + "org.bouncycastle.jcajce.provider.asymmetric.dstu", + "org.bouncycastle.jcajce.provider.asymmetric.ec", + "org.bouncycastle.jcajce.provider.asymmetric.ecgost", + "org.bouncycastle.jcajce.provider.asymmetric.ecgost12", + "org.bouncycastle.jcajce.provider.asymmetric.edec", + "org.bouncycastle.jcajce.provider.asymmetric.elgamal", + "org.bouncycastle.jcajce.provider.asymmetric.gost", + "org.bouncycastle.jcajce.provider.asymmetric.ies", + "org.bouncycastle.jcajce.provider.asymmetric.rsa", + "org.bouncycastle.jcajce.provider.asymmetric.util", + "org.bouncycastle.jcajce.provider.asymmetric.x509", + "org.bouncycastle.jcajce.provider.config", + "org.bouncycastle.jcajce.provider.digest", + "org.bouncycastle.jcajce.provider.drbg", + "org.bouncycastle.jcajce.provider.keystore", + "org.bouncycastle.jcajce.provider.keystore.bc", + "org.bouncycastle.jcajce.provider.keystore.bcfks", + "org.bouncycastle.jcajce.provider.keystore.pkcs12", + "org.bouncycastle.jcajce.provider.keystore.util", + "org.bouncycastle.jcajce.provider.symmetric", + "org.bouncycastle.jcajce.provider.symmetric.util", + "org.bouncycastle.jcajce.provider.util", + "org.bouncycastle.jcajce.spec", + "org.bouncycastle.jcajce.util", + "org.bouncycastle.jce", + "org.bouncycastle.jce.exception", + "org.bouncycastle.jce.interfaces", + "org.bouncycastle.jce.netscape", + "org.bouncycastle.jce.provider", + "org.bouncycastle.jce.spec", + "org.bouncycastle.math", + "org.bouncycastle.math.ec", + "org.bouncycastle.math.ec.custom.djb", + "org.bouncycastle.math.ec.custom.gm", + "org.bouncycastle.math.ec.custom.sec", + "org.bouncycastle.math.ec.endo", + "org.bouncycastle.math.ec.rfc7748", + "org.bouncycastle.math.ec.rfc8032", + "org.bouncycastle.math.ec.tools", + "org.bouncycastle.math.field", + "org.bouncycastle.math.raw", + "org.bouncycastle.pqc.asn1", + "org.bouncycastle.pqc.crypto", + "org.bouncycastle.pqc.crypto.gmss", + "org.bouncycastle.pqc.crypto.gmss.util", + "org.bouncycastle.pqc.crypto.lms", + "org.bouncycastle.pqc.crypto.mceliece", + "org.bouncycastle.pqc.crypto.newhope", + "org.bouncycastle.pqc.crypto.qtesla", + "org.bouncycastle.pqc.crypto.rainbow", + "org.bouncycastle.pqc.crypto.rainbow.util", + "org.bouncycastle.pqc.crypto.sphincs", + "org.bouncycastle.pqc.crypto.sphincsplus", + "org.bouncycastle.pqc.crypto.util", + "org.bouncycastle.pqc.crypto.xmss", + "org.bouncycastle.pqc.jcajce.interfaces", + "org.bouncycastle.pqc.jcajce.provider", + "org.bouncycastle.pqc.jcajce.provider.gmss", + "org.bouncycastle.pqc.jcajce.provider.lms", + "org.bouncycastle.pqc.jcajce.provider.mceliece", + "org.bouncycastle.pqc.jcajce.provider.newhope", + "org.bouncycastle.pqc.jcajce.provider.qtesla", + "org.bouncycastle.pqc.jcajce.provider.rainbow", + "org.bouncycastle.pqc.jcajce.provider.sphincs", + "org.bouncycastle.pqc.jcajce.provider.util", + "org.bouncycastle.pqc.jcajce.provider.xmss", + "org.bouncycastle.pqc.jcajce.spec", + "org.bouncycastle.pqc.math.linearalgebra", + "org.bouncycastle.util", + "org.bouncycastle.util.encoders", + "org.bouncycastle.util.io", + "org.bouncycastle.util.io.pem", + "org.bouncycastle.util.test", + "org.bouncycastle.x509", + "org.bouncycastle.x509.extension", + "org.bouncycastle.x509.util" + ], + "org.bouncycastle:bcprov-jdk18on": [ + "org.bouncycastle", + "org.bouncycastle.asn1", + "org.bouncycastle.asn1.anssi", + "org.bouncycastle.asn1.bc", + "org.bouncycastle.asn1.cryptlib", + "org.bouncycastle.asn1.cryptopro", + "org.bouncycastle.asn1.edec", + "org.bouncycastle.asn1.gm", + "org.bouncycastle.asn1.gnu", + "org.bouncycastle.asn1.iana", + "org.bouncycastle.asn1.isara", + "org.bouncycastle.asn1.iso", + "org.bouncycastle.asn1.kisa", + "org.bouncycastle.asn1.microsoft", + "org.bouncycastle.asn1.misc", + "org.bouncycastle.asn1.mozilla", + "org.bouncycastle.asn1.nist", + "org.bouncycastle.asn1.nsri", + "org.bouncycastle.asn1.ntt", + "org.bouncycastle.asn1.ocsp", + "org.bouncycastle.asn1.oiw", + "org.bouncycastle.asn1.pkcs", + "org.bouncycastle.asn1.rosstandart", + "org.bouncycastle.asn1.sec", + "org.bouncycastle.asn1.teletrust", + "org.bouncycastle.asn1.ua", + "org.bouncycastle.asn1.util", + "org.bouncycastle.asn1.x500", + "org.bouncycastle.asn1.x500.style", + "org.bouncycastle.asn1.x509", + "org.bouncycastle.asn1.x509.qualified", + "org.bouncycastle.asn1.x509.sigi", + "org.bouncycastle.asn1.x9", + "org.bouncycastle.crypto", + "org.bouncycastle.crypto.agreement", + "org.bouncycastle.crypto.agreement.jpake", + "org.bouncycastle.crypto.agreement.kdf", + "org.bouncycastle.crypto.agreement.srp", + "org.bouncycastle.crypto.commitments", + "org.bouncycastle.crypto.constraints", + "org.bouncycastle.crypto.digests", + "org.bouncycastle.crypto.ec", + "org.bouncycastle.crypto.encodings", + "org.bouncycastle.crypto.engines", + "org.bouncycastle.crypto.examples", + "org.bouncycastle.crypto.fpe", + "org.bouncycastle.crypto.generators", + "org.bouncycastle.crypto.hpke", + "org.bouncycastle.crypto.io", + "org.bouncycastle.crypto.kems", + "org.bouncycastle.crypto.macs", + "org.bouncycastle.crypto.modes", + "org.bouncycastle.crypto.modes.gcm", + "org.bouncycastle.crypto.modes.kgcm", + "org.bouncycastle.crypto.paddings", + "org.bouncycastle.crypto.params", + "org.bouncycastle.crypto.parsers", + "org.bouncycastle.crypto.prng", + "org.bouncycastle.crypto.prng.drbg", + "org.bouncycastle.crypto.signers", + "org.bouncycastle.crypto.util", + "org.bouncycastle.i18n", + "org.bouncycastle.i18n.filter", + "org.bouncycastle.iana", + "org.bouncycastle.internal.asn1.bsi", + "org.bouncycastle.internal.asn1.cms", + "org.bouncycastle.internal.asn1.eac", + "org.bouncycastle.internal.asn1.isismtt", + "org.bouncycastle.jcajce", + "org.bouncycastle.jcajce.interfaces", + "org.bouncycastle.jcajce.io", + "org.bouncycastle.jcajce.provider.asymmetric", + "org.bouncycastle.jcajce.provider.asymmetric.dh", + "org.bouncycastle.jcajce.provider.asymmetric.dsa", + "org.bouncycastle.jcajce.provider.asymmetric.dstu", + "org.bouncycastle.jcajce.provider.asymmetric.ec", + "org.bouncycastle.jcajce.provider.asymmetric.ecgost", + "org.bouncycastle.jcajce.provider.asymmetric.ecgost12", + "org.bouncycastle.jcajce.provider.asymmetric.edec", + "org.bouncycastle.jcajce.provider.asymmetric.elgamal", + "org.bouncycastle.jcajce.provider.asymmetric.gost", + "org.bouncycastle.jcajce.provider.asymmetric.ies", + "org.bouncycastle.jcajce.provider.asymmetric.rsa", + "org.bouncycastle.jcajce.provider.asymmetric.util", + "org.bouncycastle.jcajce.provider.asymmetric.x509", + "org.bouncycastle.jcajce.provider.config", + "org.bouncycastle.jcajce.provider.digest", + "org.bouncycastle.jcajce.provider.drbg", + "org.bouncycastle.jcajce.provider.keystore", + "org.bouncycastle.jcajce.provider.keystore.bc", + "org.bouncycastle.jcajce.provider.keystore.bcfks", + "org.bouncycastle.jcajce.provider.keystore.pkcs12", + "org.bouncycastle.jcajce.provider.keystore.util", + "org.bouncycastle.jcajce.provider.symmetric", + "org.bouncycastle.jcajce.provider.symmetric.util", + "org.bouncycastle.jcajce.provider.util", + "org.bouncycastle.jcajce.spec", + "org.bouncycastle.jcajce.util", + "org.bouncycastle.jce", + "org.bouncycastle.jce.exception", + "org.bouncycastle.jce.interfaces", + "org.bouncycastle.jce.netscape", + "org.bouncycastle.jce.provider", + "org.bouncycastle.jce.spec", + "org.bouncycastle.math", + "org.bouncycastle.math.ec", + "org.bouncycastle.math.ec.custom.djb", + "org.bouncycastle.math.ec.custom.gm", + "org.bouncycastle.math.ec.custom.sec", + "org.bouncycastle.math.ec.endo", + "org.bouncycastle.math.ec.rfc7748", + "org.bouncycastle.math.ec.rfc8032", + "org.bouncycastle.math.ec.tools", + "org.bouncycastle.math.field", + "org.bouncycastle.math.raw", + "org.bouncycastle.pqc.asn1", + "org.bouncycastle.pqc.crypto", + "org.bouncycastle.pqc.crypto.bike", + "org.bouncycastle.pqc.crypto.cmce", + "org.bouncycastle.pqc.crypto.crystals.dilithium", + "org.bouncycastle.pqc.crypto.crystals.kyber", + "org.bouncycastle.pqc.crypto.falcon", + "org.bouncycastle.pqc.crypto.frodo", + "org.bouncycastle.pqc.crypto.gemss", + "org.bouncycastle.pqc.crypto.hqc", + "org.bouncycastle.pqc.crypto.lms", + "org.bouncycastle.pqc.crypto.newhope", + "org.bouncycastle.pqc.crypto.ntru", + "org.bouncycastle.pqc.crypto.ntruprime", + "org.bouncycastle.pqc.crypto.picnic", + "org.bouncycastle.pqc.crypto.rainbow", + "org.bouncycastle.pqc.crypto.saber", + "org.bouncycastle.pqc.crypto.sphincs", + "org.bouncycastle.pqc.crypto.sphincsplus", + "org.bouncycastle.pqc.crypto.util", + "org.bouncycastle.pqc.crypto.xmss", + "org.bouncycastle.pqc.jcajce.interfaces", + "org.bouncycastle.pqc.jcajce.provider", + "org.bouncycastle.pqc.jcajce.provider.bike", + "org.bouncycastle.pqc.jcajce.provider.cmce", + "org.bouncycastle.pqc.jcajce.provider.dilithium", + "org.bouncycastle.pqc.jcajce.provider.falcon", + "org.bouncycastle.pqc.jcajce.provider.frodo", + "org.bouncycastle.pqc.jcajce.provider.gmss", + "org.bouncycastle.pqc.jcajce.provider.hqc", + "org.bouncycastle.pqc.jcajce.provider.kyber", + "org.bouncycastle.pqc.jcajce.provider.lms", + "org.bouncycastle.pqc.jcajce.provider.mceliece", + "org.bouncycastle.pqc.jcajce.provider.newhope", + "org.bouncycastle.pqc.jcajce.provider.ntru", + "org.bouncycastle.pqc.jcajce.provider.ntruprime", + "org.bouncycastle.pqc.jcajce.provider.picnic", + "org.bouncycastle.pqc.jcajce.provider.rainbow", + "org.bouncycastle.pqc.jcajce.provider.saber", + "org.bouncycastle.pqc.jcajce.provider.sphincs", + "org.bouncycastle.pqc.jcajce.provider.sphincsplus", + "org.bouncycastle.pqc.jcajce.provider.util", + "org.bouncycastle.pqc.jcajce.provider.xmss", + "org.bouncycastle.pqc.jcajce.spec", + "org.bouncycastle.pqc.legacy.crypto.gmss", + "org.bouncycastle.pqc.legacy.crypto.gmss.util", + "org.bouncycastle.pqc.legacy.crypto.mceliece", + "org.bouncycastle.pqc.legacy.crypto.qtesla", + "org.bouncycastle.pqc.legacy.crypto.rainbow", + "org.bouncycastle.pqc.legacy.crypto.rainbow.util", + "org.bouncycastle.pqc.legacy.math.linearalgebra", + "org.bouncycastle.pqc.math.ntru", + "org.bouncycastle.pqc.math.ntru.parameters", + "org.bouncycastle.util", + "org.bouncycastle.util.encoders", + "org.bouncycastle.util.io", + "org.bouncycastle.util.io.pem", + "org.bouncycastle.util.test", + "org.bouncycastle.x509", + "org.bouncycastle.x509.extension", + "org.bouncycastle.x509.util" + ], + "org.bouncycastle:bcutil-jdk18on": [ + "org.bouncycastle.asn1.bsi", + "org.bouncycastle.asn1.cmc", + "org.bouncycastle.asn1.cmp", + "org.bouncycastle.asn1.cms", + "org.bouncycastle.asn1.cms.ecc", + "org.bouncycastle.asn1.crmf", + "org.bouncycastle.asn1.dvcs", + "org.bouncycastle.asn1.eac", + "org.bouncycastle.asn1.esf", + "org.bouncycastle.asn1.ess", + "org.bouncycastle.asn1.est", + "org.bouncycastle.asn1.icao", + "org.bouncycastle.asn1.isismtt", + "org.bouncycastle.asn1.isismtt.ocsp", + "org.bouncycastle.asn1.isismtt.x509", + "org.bouncycastle.asn1.smime", + "org.bouncycastle.asn1.tsp", + "org.bouncycastle.oer", + "org.bouncycastle.oer.its", + "org.bouncycastle.oer.its.etsi102941", + "org.bouncycastle.oer.its.etsi102941.basetypes", + "org.bouncycastle.oer.its.etsi103097", + "org.bouncycastle.oer.its.etsi103097.extension", + "org.bouncycastle.oer.its.ieee1609dot2", + "org.bouncycastle.oer.its.ieee1609dot2.basetypes", + "org.bouncycastle.oer.its.ieee1609dot2dot1", + "org.bouncycastle.oer.its.template.etsi102941", + "org.bouncycastle.oer.its.template.etsi102941.basetypes", + "org.bouncycastle.oer.its.template.etsi103097", + "org.bouncycastle.oer.its.template.etsi103097.extension", + "org.bouncycastle.oer.its.template.ieee1609dot2", + "org.bouncycastle.oer.its.template.ieee1609dot2.basetypes", + "org.bouncycastle.oer.its.template.ieee1609dot2dot1" + ], + "org.checkerframework:checker-qual": [ + "org.checkerframework.checker.builder.qual", + "org.checkerframework.checker.calledmethods.qual", + "org.checkerframework.checker.compilermsgs.qual", + "org.checkerframework.checker.fenum.qual", + "org.checkerframework.checker.formatter.qual", + "org.checkerframework.checker.guieffect.qual", + "org.checkerframework.checker.i18n.qual", + "org.checkerframework.checker.i18nformatter.qual", + "org.checkerframework.checker.index.qual", + "org.checkerframework.checker.initialization.qual", + "org.checkerframework.checker.interning.qual", + "org.checkerframework.checker.lock.qual", + "org.checkerframework.checker.mustcall.qual", + "org.checkerframework.checker.nullness.qual", + "org.checkerframework.checker.optional.qual", + "org.checkerframework.checker.propkey.qual", + "org.checkerframework.checker.regex.qual", + "org.checkerframework.checker.signature.qual", + "org.checkerframework.checker.signedness.qual", + "org.checkerframework.checker.tainting.qual", + "org.checkerframework.checker.units.qual", + "org.checkerframework.common.aliasing.qual", + "org.checkerframework.common.initializedfields.qual", + "org.checkerframework.common.reflection.qual", + "org.checkerframework.common.returnsreceiver.qual", + "org.checkerframework.common.subtyping.qual", + "org.checkerframework.common.util.report.qual", + "org.checkerframework.common.value.qual", + "org.checkerframework.dataflow.qual", + "org.checkerframework.framework.qual" + ], + "org.codehaus.mojo:animal-sniffer-annotations": [ + "org.codehaus.mojo.animal_sniffer" + ], + "org.glassfish.hk2.external:aopalliance-repackaged": [ + "org.aopalliance.aop", + "org.aopalliance.instrument", + "org.aopalliance.intercept", + "org.aopalliance.reflect" + ], + "org.glassfish.hk2.external:jakarta.inject": [ + "javax.inject" + ], + "org.glassfish.hk2:hk2-api": [ + "org.glassfish.hk2.api", + "org.glassfish.hk2.api.messaging", + "org.glassfish.hk2.extension", + "org.glassfish.hk2.internal", + "org.glassfish.hk2.utilities", + "org.glassfish.hk2.utilities.binding", + "org.jvnet.hk2.annotations" + ], + "org.glassfish.hk2:hk2-locator": [ + "org.jvnet.hk2.external.generator", + "org.jvnet.hk2.external.runtime", + "org.jvnet.hk2.internal" + ], + "org.glassfish.hk2:hk2-utils": [ + "org.glassfish.hk2.utilities.cache", + "org.glassfish.hk2.utilities.cache.internal", + "org.glassfish.hk2.utilities.general", + "org.glassfish.hk2.utilities.general.internal", + "org.glassfish.hk2.utilities.reflection", + "org.glassfish.hk2.utilities.reflection.internal", + "org.jvnet.hk2.component" + ], + "org.glassfish.hk2:osgi-resource-locator": [ + "org.glassfish.hk2.osgiresourcelocator" + ], + "org.glassfish.jersey.connectors:jersey-apache-connector": [ + "org.glassfish.jersey.apache.connector" + ], + "org.glassfish.jersey.core:jersey-client": [ + "org.glassfish.jersey.client", + "org.glassfish.jersey.client.authentication", + "org.glassfish.jersey.client.filter", + "org.glassfish.jersey.client.inject", + "org.glassfish.jersey.client.internal", + "org.glassfish.jersey.client.internal.inject", + "org.glassfish.jersey.client.internal.jdkconnector", + "org.glassfish.jersey.client.internal.routing", + "org.glassfish.jersey.client.spi" + ], + "org.glassfish.jersey.core:jersey-common": [ + "org.glassfish.jersey", + "org.glassfish.jersey.internal", + "org.glassfish.jersey.internal.config", + "org.glassfish.jersey.internal.guava", + "org.glassfish.jersey.internal.inject", + "org.glassfish.jersey.internal.jsr166", + "org.glassfish.jersey.internal.l10n", + "org.glassfish.jersey.internal.routing", + "org.glassfish.jersey.internal.sonar", + "org.glassfish.jersey.internal.spi", + "org.glassfish.jersey.internal.util", + "org.glassfish.jersey.internal.util.collection", + "org.glassfish.jersey.logging", + "org.glassfish.jersey.message", + "org.glassfish.jersey.message.internal", + "org.glassfish.jersey.model", + "org.glassfish.jersey.model.internal", + "org.glassfish.jersey.model.internal.spi", + "org.glassfish.jersey.process", + "org.glassfish.jersey.process.internal", + "org.glassfish.jersey.spi", + "org.glassfish.jersey.uri", + "org.glassfish.jersey.uri.internal" + ], + "org.glassfish.jersey.inject:jersey-hk2": [ + "org.glassfish.jersey.inject.hk2" + ], + "org.hamcrest:hamcrest-core": [ + "org.hamcrest", + "org.hamcrest.core", + "org.hamcrest.internal" + ], + "org.javassist:javassist": [ + "javassist", + "javassist.bytecode", + "javassist.bytecode.analysis", + "javassist.bytecode.annotation", + "javassist.bytecode.stackmap", + "javassist.compiler", + "javassist.compiler.ast", + "javassist.convert", + "javassist.expr", + "javassist.runtime", + "javassist.scopedpool", + "javassist.tools", + "javassist.tools.reflect", + "javassist.tools.rmi", + "javassist.tools.web", + "javassist.util", + "javassist.util.proxy" + ], + "org.jboss.marshalling:jboss-marshalling": [ + "org.jboss.marshalling", + "org.jboss.marshalling._private", + "org.jboss.marshalling.cloner", + "org.jboss.marshalling.reflect", + "org.jboss.marshalling.util" + ], + "org.jboss.marshalling:jboss-marshalling-river": [ + "org.jboss.marshalling.river" + ], + "org.jetbrains:annotations": [ + "org.intellij.lang.annotations", + "org.jetbrains.annotations" + ], + "org.jodd:jodd-bean": [ + "jodd.bean", + "jodd.introspector", + "jodd.typeconverter", + "jodd.typeconverter.impl" + ], + "org.jodd:jodd-core": [ + "jodd", + "jodd.buffer", + "jodd.cache", + "jodd.chalk", + "jodd.cli", + "jodd.core", + "jodd.exception", + "jodd.inex", + "jodd.io", + "jodd.io.findfile", + "jodd.io.upload", + "jodd.io.upload.impl", + "jodd.io.watch", + "jodd.mutable", + "jodd.net", + "jodd.system", + "jodd.template", + "jodd.time", + "jodd.util", + "jodd.util.annotation", + "jodd.util.cl", + "jodd.util.collection", + "jodd.util.concurrent" + ], + "org.json:json": [ + "org.json" + ], + "org.luaj:luaj-jse": [ + "", + "org.luaj.vm2", + "org.luaj.vm2.ast", + "org.luaj.vm2.compiler", + "org.luaj.vm2.lib", + "org.luaj.vm2.lib.jse", + "org.luaj.vm2.luajc", + "org.luaj.vm2.parser", + "org.luaj.vm2.script", + "org.luaj.vm2.server" + ], + "org.mockito:mockito-core": [ + "org.mockito", + "org.mockito.codegen", + "org.mockito.configuration", + "org.mockito.creation.instance", + "org.mockito.exceptions.base", + "org.mockito.exceptions.misusing", + "org.mockito.exceptions.stacktrace", + "org.mockito.exceptions.verification", + "org.mockito.exceptions.verification.junit", + "org.mockito.exceptions.verification.opentest4j", + "org.mockito.hamcrest", + "org.mockito.internal", + "org.mockito.internal.configuration", + "org.mockito.internal.configuration.injection", + "org.mockito.internal.configuration.injection.filter", + "org.mockito.internal.configuration.injection.scanner", + "org.mockito.internal.configuration.plugins", + "org.mockito.internal.creation", + "org.mockito.internal.creation.bytebuddy", + "org.mockito.internal.creation.instance", + "org.mockito.internal.creation.proxy", + "org.mockito.internal.creation.settings", + "org.mockito.internal.creation.util", + "org.mockito.internal.debugging", + "org.mockito.internal.exceptions", + "org.mockito.internal.exceptions.stacktrace", + "org.mockito.internal.exceptions.util", + "org.mockito.internal.framework", + "org.mockito.internal.hamcrest", + "org.mockito.internal.handler", + "org.mockito.internal.invocation", + "org.mockito.internal.invocation.finder", + "org.mockito.internal.invocation.mockref", + "org.mockito.internal.junit", + "org.mockito.internal.listeners", + "org.mockito.internal.matchers", + "org.mockito.internal.matchers.apachecommons", + "org.mockito.internal.matchers.text", + "org.mockito.internal.progress", + "org.mockito.internal.reporting", + "org.mockito.internal.runners", + "org.mockito.internal.runners.util", + "org.mockito.internal.session", + "org.mockito.internal.stubbing", + "org.mockito.internal.stubbing.answers", + "org.mockito.internal.stubbing.defaultanswers", + "org.mockito.internal.util", + "org.mockito.internal.util.collections", + "org.mockito.internal.util.concurrent", + "org.mockito.internal.util.io", + "org.mockito.internal.util.reflection", + "org.mockito.internal.verification", + "org.mockito.internal.verification.api", + "org.mockito.internal.verification.argumentmatching", + "org.mockito.internal.verification.checkers", + "org.mockito.invocation", + "org.mockito.junit", + "org.mockito.listeners", + "org.mockito.mock", + "org.mockito.plugins", + "org.mockito.quality", + "org.mockito.session", + "org.mockito.stubbing", + "org.mockito.verification" + ], + "org.objenesis:objenesis": [ + "org.objenesis", + "org.objenesis.instantiator", + "org.objenesis.instantiator.android", + "org.objenesis.instantiator.annotations", + "org.objenesis.instantiator.basic", + "org.objenesis.instantiator.gcj", + "org.objenesis.instantiator.perc", + "org.objenesis.instantiator.sun", + "org.objenesis.instantiator.util", + "org.objenesis.strategy" + ], + "org.openjdk.jmh:jmh-core": [ + "org.openjdk.jmh", + "org.openjdk.jmh.annotations", + "org.openjdk.jmh.generators.core", + "org.openjdk.jmh.infra", + "org.openjdk.jmh.profile", + "org.openjdk.jmh.results", + "org.openjdk.jmh.results.format", + "org.openjdk.jmh.runner", + "org.openjdk.jmh.runner.format", + "org.openjdk.jmh.runner.link", + "org.openjdk.jmh.runner.options", + "org.openjdk.jmh.util", + "org.openjdk.jmh.util.lines" + ], + "org.openjdk.jmh:jmh-generator-annprocess": [ + "org.openjdk.jmh.generators", + "org.openjdk.jmh.generators.annotations" + ], + "org.ow2.asm:asm": [ + "org.objectweb.asm", + "org.objectweb.asm.signature" + ], + "org.ow2.asm:asm-analysis": [ + "org.objectweb.asm.tree.analysis" + ], + "org.ow2.asm:asm-commons": [ + "org.objectweb.asm.commons" + ], + "org.ow2.asm:asm-tree": [ + "org.objectweb.asm.tree" + ], + "org.ow2.asm:asm-util": [ + "org.objectweb.asm.util" + ], + "org.pcollections:pcollections": [ + "org.pcollections" + ], + "org.projectlombok:lombok": [ + "lombok", + "lombok.delombok.ant", + "lombok.experimental", + "lombok.extern.apachecommons", + "lombok.extern.flogger", + "lombok.extern.jackson", + "lombok.extern.java", + "lombok.extern.jbosslog", + "lombok.extern.log4j", + "lombok.extern.slf4j", + "lombok.javac.apt", + "lombok.launch" + ], + "org.reactivestreams:reactive-streams": [ + "org.reactivestreams" + ], + "org.redisson:redisson": [ + "org.redisson", + "org.redisson.api", + "org.redisson.api.annotation", + "org.redisson.api.condition", + "org.redisson.api.executor", + "org.redisson.api.geo", + "org.redisson.api.listener", + "org.redisson.api.map", + "org.redisson.api.map.event", + "org.redisson.api.mapreduce", + "org.redisson.api.queue", + "org.redisson.api.redisnode", + "org.redisson.api.search", + "org.redisson.api.search.aggregate", + "org.redisson.api.search.index", + "org.redisson.api.search.query", + "org.redisson.api.stream", + "org.redisson.cache", + "org.redisson.client", + "org.redisson.client.codec", + "org.redisson.client.handler", + "org.redisson.client.protocol", + "org.redisson.client.protocol.convertor", + "org.redisson.client.protocol.decoder", + "org.redisson.client.protocol.pubsub", + "org.redisson.cluster", + "org.redisson.codec", + "org.redisson.command", + "org.redisson.config", + "org.redisson.connection", + "org.redisson.connection.balancer", + "org.redisson.connection.decoder", + "org.redisson.connection.pool", + "org.redisson.eviction", + "org.redisson.executor", + "org.redisson.executor.params", + "org.redisson.iterator", + "org.redisson.jcache", + "org.redisson.jcache.bean", + "org.redisson.jcache.configuration", + "org.redisson.liveobject", + "org.redisson.liveobject.condition", + "org.redisson.liveobject.core", + "org.redisson.liveobject.misc", + "org.redisson.liveobject.resolver", + "org.redisson.mapreduce", + "org.redisson.misc", + "org.redisson.pubsub", + "org.redisson.reactive", + "org.redisson.redisnode", + "org.redisson.remote", + "org.redisson.rx", + "org.redisson.spring.cache", + "org.redisson.spring.misc", + "org.redisson.spring.session", + "org.redisson.spring.session.config", + "org.redisson.spring.support", + "org.redisson.spring.transaction", + "org.redisson.transaction", + "org.redisson.transaction.operation", + "org.redisson.transaction.operation.bucket", + "org.redisson.transaction.operation.map", + "org.redisson.transaction.operation.set" + ], + "org.reflections:reflections": [ + "org.reflections", + "org.reflections.scanners", + "org.reflections.serializers", + "org.reflections.util", + "org.reflections.vfs" + ], + "org.slf4j:jcl-over-slf4j": [ + "org.apache.commons.logging", + "org.apache.commons.logging.impl" + ], + "org.slf4j:slf4j-api": [ + "org.slf4j", + "org.slf4j.event", + "org.slf4j.helpers", + "org.slf4j.spi" + ], + "org.slf4j:slf4j-simple": [ + "org.slf4j.simple" + ], + "org.threeten:threetenbp": [ + "org.threeten.bp", + "org.threeten.bp.chrono", + "org.threeten.bp.format", + "org.threeten.bp.jdk8", + "org.threeten.bp.temporal", + "org.threeten.bp.zone" + ], + "org.xerial:sqlite-jdbc": [ + "org.sqlite", + "org.sqlite.core", + "org.sqlite.date", + "org.sqlite.javax", + "org.sqlite.jdbc3", + "org.sqlite.jdbc4", + "org.sqlite.util" + ], + "org.yaml:snakeyaml": [ + "org.yaml.snakeyaml", + "org.yaml.snakeyaml.comments", + "org.yaml.snakeyaml.composer", + "org.yaml.snakeyaml.constructor", + "org.yaml.snakeyaml.emitter", + "org.yaml.snakeyaml.env", + "org.yaml.snakeyaml.error", + "org.yaml.snakeyaml.events", + "org.yaml.snakeyaml.extensions.compactnotation", + "org.yaml.snakeyaml.external.biz.base64Coder", + "org.yaml.snakeyaml.external.com.google.gdata.util.common.base", + "org.yaml.snakeyaml.inspector", + "org.yaml.snakeyaml.internal", + "org.yaml.snakeyaml.introspector", + "org.yaml.snakeyaml.nodes", + "org.yaml.snakeyaml.parser", + "org.yaml.snakeyaml.reader", + "org.yaml.snakeyaml.representer", + "org.yaml.snakeyaml.resolver", + "org.yaml.snakeyaml.scanner", + "org.yaml.snakeyaml.serializer", + "org.yaml.snakeyaml.tokens", + "org.yaml.snakeyaml.util" + ], + "redis.clients:jedis": [ + "redis.clients.jedis", + "redis.clients.jedis.args", + "redis.clients.jedis.bloom", + "redis.clients.jedis.bloom.commands", + "redis.clients.jedis.commands", + "redis.clients.jedis.exceptions", + "redis.clients.jedis.executors", + "redis.clients.jedis.gears", + "redis.clients.jedis.gears.resps", + "redis.clients.jedis.graph", + "redis.clients.jedis.graph.entities", + "redis.clients.jedis.json", + "redis.clients.jedis.json.commands", + "redis.clients.jedis.mcf", + "redis.clients.jedis.params", + "redis.clients.jedis.providers", + "redis.clients.jedis.resps", + "redis.clients.jedis.search", + "redis.clients.jedis.search.aggr", + "redis.clients.jedis.search.querybuilder", + "redis.clients.jedis.search.schemafields", + "redis.clients.jedis.timeseries", + "redis.clients.jedis.util" + ], + "software.amazon.ion:ion-java": [ + "software.amazon.ion", + "software.amazon.ion.apps", + "software.amazon.ion.facet", + "software.amazon.ion.impl", + "software.amazon.ion.impl.bin", + "software.amazon.ion.impl.lite", + "software.amazon.ion.system", + "software.amazon.ion.util" + ] + }, + "repositories": { + "https://repo.maven.apache.org/maven2/": [ + "aopalliance:aopalliance", + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:aws-java-sdk-s3", + "com.amazonaws:aws-java-sdk-secretsmanager", + "com.amazonaws:jmespath-java", + "com.esotericsoftware:kryo", + "com.esotericsoftware:minlog", + "com.esotericsoftware:reflectasm", + "com.fasterxml.jackson.core:jackson-annotations", + "com.fasterxml.jackson.core:jackson-core", + "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", + "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base", + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider", + "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", + "com.github.ben-manes.caffeine:caffeine", + "com.github.docker-java:docker-java", + "com.github.docker-java:docker-java-api", + "com.github.docker-java:docker-java-core", + "com.github.docker-java:docker-java-transport", + "com.github.docker-java:docker-java-transport-jersey", + "com.github.docker-java:docker-java-transport-netty", + "com.github.fppt:jedis-mock", + "com.github.jnr:jffi", + "com.github.jnr:jffi:jar:native", + "com.github.jnr:jnr-a64asm", + "com.github.jnr:jnr-constants", + "com.github.jnr:jnr-ffi", + "com.github.jnr:jnr-posix", + "com.github.jnr:jnr-x86asm", + "com.github.kevinstern:software-and-algorithms", + "com.github.luben:zstd-jni", + "com.github.oshi:oshi-core", + "com.github.pcj:google-options", + "com.github.serceman:jnr-fuse", + "com.google.android:annotations", + "com.google.api.grpc:proto-google-common-protos", + "com.google.auth:google-auth-library-credentials", + "com.google.auth:google-auth-library-oauth2-http", + "com.google.auto.service:auto-service-annotations", + "com.google.auto.value:auto-value-annotations", + "com.google.auto:auto-common", + "com.google.code.findbugs:jsr305", + "com.google.code.gson:gson", + "com.google.errorprone:error_prone_annotation", + "com.google.errorprone:error_prone_annotations", + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core", + "com.google.errorprone:error_prone_type_annotations", + "com.google.guava:failureaccess", + "com.google.guava:guava", + "com.google.guava:guava-testlib", + "com.google.guava:listenablefuture", + "com.google.http-client:google-http-client", + "com.google.http-client:google-http-client-gson", + "com.google.inject:guice", + "com.google.j2objc:j2objc-annotations", + "com.google.jimfs:jimfs", + "com.google.protobuf:protobuf-java", + "com.google.protobuf:protobuf-java-util", + "com.google.truth:truth", + "com.googlecode.json-simple:json-simple", + "com.jayway.jsonpath:json-path", + "com.kohlschutter.junixsocket:junixsocket-common", + "com.kohlschutter.junixsocket:junixsocket-native-common", + "com.sun.activation:jakarta.activation", + "commons-codec:commons-codec", + "commons-io:commons-io", + "commons-logging:commons-logging", + "io.github.eisop:dataflow-errorprone", + "io.github.java-diff-utils:java-diff-utils", + "io.grpc:grpc-api", + "io.grpc:grpc-auth", + "io.grpc:grpc-context", + "io.grpc:grpc-core", + "io.grpc:grpc-inprocess", + "io.grpc:grpc-netty", + "io.grpc:grpc-netty-shaded", + "io.grpc:grpc-protobuf", + "io.grpc:grpc-protobuf-lite", + "io.grpc:grpc-services", + "io.grpc:grpc-stub", + "io.grpc:grpc-testing", + "io.grpc:grpc-util", + "io.netty:netty-buffer", + "io.netty:netty-codec", + "io.netty:netty-codec-dns", + "io.netty:netty-codec-http", + "io.netty:netty-codec-http2", + "io.netty:netty-codec-socks", + "io.netty:netty-common", + "io.netty:netty-handler", + "io.netty:netty-handler-proxy", + "io.netty:netty-resolver", + "io.netty:netty-resolver-dns", + "io.netty:netty-transport", + "io.netty:netty-transport-classes-epoll", + "io.netty:netty-transport-classes-kqueue", + "io.netty:netty-transport-native-epoll", + "io.netty:netty-transport-native-epoll:jar:linux-x86_64", + "io.netty:netty-transport-native-kqueue", + "io.netty:netty-transport-native-kqueue:jar:osx-x86_64", + "io.netty:netty-transport-native-unix-common", + "io.opencensus:opencensus-api", + "io.opencensus:opencensus-contrib-http-util", + "io.perfmark:perfmark-api", + "io.projectreactor:reactor-core", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common", + "io.prometheus:simpleclient_hotspot", + "io.prometheus:simpleclient_httpserver", + "io.prometheus:simpleclient_tracer_common", + "io.prometheus:simpleclient_tracer_otel", + "io.prometheus:simpleclient_tracer_otel_agent", + "io.reactivex.rxjava3:rxjava", + "jakarta.activation:jakarta.activation-api", + "jakarta.annotation:jakarta.annotation-api", + "jakarta.ws.rs:jakarta.ws.rs-api", + "jakarta.xml.bind:jakarta.xml.bind-api", + "javax.annotation:javax.annotation-api", + "javax.cache:cache-api", + "javax.inject:javax.inject", + "joda-time:joda-time", + "junit:junit", + "me.dinowernli:java-grpc-prometheus", + "net.bytebuddy:byte-buddy", + "net.bytebuddy:byte-buddy-agent", + "net.java.dev.jna:jna", + "net.java.dev.jna:jna-platform", + "net.javacrumbs.future-converter:future-converter-common", + "net.javacrumbs.future-converter:future-converter-guava-common", + "net.javacrumbs.future-converter:future-converter-java8-common", + "net.javacrumbs.future-converter:future-converter-java8-guava", + "net.jcip:jcip-annotations", + "net.minidev:accessors-smart", + "net.minidev:json-smart", + "net.sf.jopt-simple:jopt-simple", + "org.apache.commons:commons-compress", + "org.apache.commons:commons-lang3", + "org.apache.commons:commons-math3", + "org.apache.commons:commons-pool2", + "org.apache.httpcomponents:httpclient", + "org.apache.httpcomponents:httpcore", + "org.apache.tomcat:annotations-api", + "org.bouncycastle:bcpkix-jdk18on", + "org.bouncycastle:bcprov-jdk15on", + "org.bouncycastle:bcprov-jdk18on", + "org.bouncycastle:bcutil-jdk18on", + "org.checkerframework:checker-qual", + "org.codehaus.mojo:animal-sniffer-annotations", + "org.glassfish.hk2.external:aopalliance-repackaged", + "org.glassfish.hk2.external:jakarta.inject", + "org.glassfish.hk2:hk2-api", + "org.glassfish.hk2:hk2-locator", + "org.glassfish.hk2:hk2-utils", + "org.glassfish.hk2:osgi-resource-locator", + "org.glassfish.jersey.connectors:jersey-apache-connector", + "org.glassfish.jersey.core:jersey-client", + "org.glassfish.jersey.core:jersey-common", + "org.glassfish.jersey.inject:jersey-hk2", + "org.hamcrest:hamcrest-core", + "org.javassist:javassist", + "org.jboss.marshalling:jboss-marshalling", + "org.jboss.marshalling:jboss-marshalling-river", + "org.jetbrains:annotations", + "org.jodd:jodd-bean", + "org.jodd:jodd-core", + "org.json:json", + "org.luaj:luaj-jse", + "org.mockito:mockito-core", + "org.objenesis:objenesis", + "org.openjdk.jmh:jmh-core", + "org.openjdk.jmh:jmh-generator-annprocess", + "org.ow2.asm:asm", + "org.ow2.asm:asm-analysis", + "org.ow2.asm:asm-commons", + "org.ow2.asm:asm-tree", + "org.ow2.asm:asm-util", + "org.pcollections:pcollections", + "org.projectlombok:lombok", + "org.reactivestreams:reactive-streams", + "org.redisson:redisson", + "org.reflections:reflections", + "org.slf4j:jcl-over-slf4j", + "org.slf4j:slf4j-api", + "org.slf4j:slf4j-simple", + "org.threeten:threetenbp", + "org.xerial:sqlite-jdbc", + "org.yaml:snakeyaml", + "redis.clients:jedis", + "software.amazon.ion:ion-java" + ], + "https://repo1.maven.org/maven2/": [ + "aopalliance:aopalliance", + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:aws-java-sdk-s3", + "com.amazonaws:aws-java-sdk-secretsmanager", + "com.amazonaws:jmespath-java", + "com.esotericsoftware:kryo", + "com.esotericsoftware:minlog", + "com.esotericsoftware:reflectasm", + "com.fasterxml.jackson.core:jackson-annotations", + "com.fasterxml.jackson.core:jackson-core", + "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", + "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base", + "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider", + "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", + "com.github.ben-manes.caffeine:caffeine", + "com.github.docker-java:docker-java", + "com.github.docker-java:docker-java-api", + "com.github.docker-java:docker-java-core", + "com.github.docker-java:docker-java-transport", + "com.github.docker-java:docker-java-transport-jersey", + "com.github.docker-java:docker-java-transport-netty", + "com.github.fppt:jedis-mock", + "com.github.jnr:jffi", + "com.github.jnr:jffi:jar:native", + "com.github.jnr:jnr-a64asm", + "com.github.jnr:jnr-constants", + "com.github.jnr:jnr-ffi", + "com.github.jnr:jnr-posix", + "com.github.jnr:jnr-x86asm", + "com.github.kevinstern:software-and-algorithms", + "com.github.luben:zstd-jni", + "com.github.oshi:oshi-core", + "com.github.pcj:google-options", + "com.github.serceman:jnr-fuse", + "com.google.android:annotations", + "com.google.api.grpc:proto-google-common-protos", + "com.google.auth:google-auth-library-credentials", + "com.google.auth:google-auth-library-oauth2-http", + "com.google.auto.service:auto-service-annotations", + "com.google.auto.value:auto-value-annotations", + "com.google.auto:auto-common", + "com.google.code.findbugs:jsr305", + "com.google.code.gson:gson", + "com.google.errorprone:error_prone_annotation", + "com.google.errorprone:error_prone_annotations", + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core", + "com.google.errorprone:error_prone_type_annotations", + "com.google.guava:failureaccess", + "com.google.guava:guava", + "com.google.guava:guava-testlib", + "com.google.guava:listenablefuture", + "com.google.http-client:google-http-client", + "com.google.http-client:google-http-client-gson", + "com.google.inject:guice", + "com.google.j2objc:j2objc-annotations", + "com.google.jimfs:jimfs", + "com.google.protobuf:protobuf-java", + "com.google.protobuf:protobuf-java-util", + "com.google.truth:truth", + "com.googlecode.json-simple:json-simple", + "com.jayway.jsonpath:json-path", + "com.kohlschutter.junixsocket:junixsocket-common", + "com.kohlschutter.junixsocket:junixsocket-native-common", + "com.sun.activation:jakarta.activation", + "commons-codec:commons-codec", + "commons-io:commons-io", + "commons-logging:commons-logging", + "io.github.eisop:dataflow-errorprone", + "io.github.java-diff-utils:java-diff-utils", + "io.grpc:grpc-api", + "io.grpc:grpc-auth", + "io.grpc:grpc-context", + "io.grpc:grpc-core", + "io.grpc:grpc-inprocess", + "io.grpc:grpc-netty", + "io.grpc:grpc-netty-shaded", + "io.grpc:grpc-protobuf", + "io.grpc:grpc-protobuf-lite", + "io.grpc:grpc-services", + "io.grpc:grpc-stub", + "io.grpc:grpc-testing", + "io.grpc:grpc-util", + "io.netty:netty-buffer", + "io.netty:netty-codec", + "io.netty:netty-codec-dns", + "io.netty:netty-codec-http", + "io.netty:netty-codec-http2", + "io.netty:netty-codec-socks", + "io.netty:netty-common", + "io.netty:netty-handler", + "io.netty:netty-handler-proxy", + "io.netty:netty-resolver", + "io.netty:netty-resolver-dns", + "io.netty:netty-transport", + "io.netty:netty-transport-classes-epoll", + "io.netty:netty-transport-classes-kqueue", + "io.netty:netty-transport-native-epoll", + "io.netty:netty-transport-native-epoll:jar:linux-x86_64", + "io.netty:netty-transport-native-kqueue", + "io.netty:netty-transport-native-kqueue:jar:osx-x86_64", + "io.netty:netty-transport-native-unix-common", + "io.opencensus:opencensus-api", + "io.opencensus:opencensus-contrib-http-util", + "io.perfmark:perfmark-api", + "io.projectreactor:reactor-core", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common", + "io.prometheus:simpleclient_hotspot", + "io.prometheus:simpleclient_httpserver", + "io.prometheus:simpleclient_tracer_common", + "io.prometheus:simpleclient_tracer_otel", + "io.prometheus:simpleclient_tracer_otel_agent", + "io.reactivex.rxjava3:rxjava", + "jakarta.activation:jakarta.activation-api", + "jakarta.annotation:jakarta.annotation-api", + "jakarta.ws.rs:jakarta.ws.rs-api", + "jakarta.xml.bind:jakarta.xml.bind-api", + "javax.annotation:javax.annotation-api", + "javax.cache:cache-api", + "javax.inject:javax.inject", + "joda-time:joda-time", + "junit:junit", + "me.dinowernli:java-grpc-prometheus", + "net.bytebuddy:byte-buddy", + "net.bytebuddy:byte-buddy-agent", + "net.java.dev.jna:jna", + "net.java.dev.jna:jna-platform", + "net.javacrumbs.future-converter:future-converter-common", + "net.javacrumbs.future-converter:future-converter-guava-common", + "net.javacrumbs.future-converter:future-converter-java8-common", + "net.javacrumbs.future-converter:future-converter-java8-guava", + "net.jcip:jcip-annotations", + "net.minidev:accessors-smart", + "net.minidev:json-smart", + "net.sf.jopt-simple:jopt-simple", + "org.apache.commons:commons-compress", + "org.apache.commons:commons-lang3", + "org.apache.commons:commons-math3", + "org.apache.commons:commons-pool2", + "org.apache.httpcomponents:httpclient", + "org.apache.httpcomponents:httpcore", + "org.apache.tomcat:annotations-api", + "org.bouncycastle:bcpkix-jdk18on", + "org.bouncycastle:bcprov-jdk15on", + "org.bouncycastle:bcprov-jdk18on", + "org.bouncycastle:bcutil-jdk18on", + "org.checkerframework:checker-qual", + "org.codehaus.mojo:animal-sniffer-annotations", + "org.glassfish.hk2.external:aopalliance-repackaged", + "org.glassfish.hk2.external:jakarta.inject", + "org.glassfish.hk2:hk2-api", + "org.glassfish.hk2:hk2-locator", + "org.glassfish.hk2:hk2-utils", + "org.glassfish.hk2:osgi-resource-locator", + "org.glassfish.jersey.connectors:jersey-apache-connector", + "org.glassfish.jersey.core:jersey-client", + "org.glassfish.jersey.core:jersey-common", + "org.glassfish.jersey.inject:jersey-hk2", + "org.hamcrest:hamcrest-core", + "org.javassist:javassist", + "org.jboss.marshalling:jboss-marshalling", + "org.jboss.marshalling:jboss-marshalling-river", + "org.jetbrains:annotations", + "org.jodd:jodd-bean", + "org.jodd:jodd-core", + "org.json:json", + "org.luaj:luaj-jse", + "org.mockito:mockito-core", + "org.objenesis:objenesis", + "org.openjdk.jmh:jmh-core", + "org.openjdk.jmh:jmh-generator-annprocess", + "org.ow2.asm:asm", + "org.ow2.asm:asm-analysis", + "org.ow2.asm:asm-commons", + "org.ow2.asm:asm-tree", + "org.ow2.asm:asm-util", + "org.pcollections:pcollections", + "org.projectlombok:lombok", + "org.reactivestreams:reactive-streams", + "org.redisson:redisson", + "org.reflections:reflections", + "org.slf4j:jcl-over-slf4j", + "org.slf4j:slf4j-api", + "org.slf4j:slf4j-simple", + "org.threeten:threetenbp", + "org.xerial:sqlite-jdbc", + "org.yaml:snakeyaml", + "redis.clients:jedis", + "software.amazon.ion:ion-java" + ] + }, + "version": "2" +} diff --git a/persistentworkers/src/main/java/persistent/common/processes/JavaProcessWrapper.java b/persistentworkers/src/main/java/persistent/common/processes/JavaProcessWrapper.java index a27b6a9e99..89f2e6a5be 100644 --- a/persistentworkers/src/main/java/persistent/common/processes/JavaProcessWrapper.java +++ b/persistentworkers/src/main/java/persistent/common/processes/JavaProcessWrapper.java @@ -10,12 +10,16 @@ public class JavaProcessWrapper extends ProcessWrapper { + // Get the path of the JVM from the current process to avoid breaking the Bazel sandbox + public static final String CURRENT_JVM_COMMAND = + ProcessHandle.current().info().command().orElseThrow(() -> new RuntimeException("Unable to retrieve the path of the running JVM")); + public JavaProcessWrapper( Path workDir, String classPath, String fullClassName, String[] args ) throws IOException { super(workDir, cmdArgs( new String[]{ - "java", + CURRENT_JVM_COMMAND, "-cp", classPath, fullClassName diff --git a/persistentworkers/src/main/java/persistent/common/util/BUILD b/persistentworkers/src/main/java/persistent/common/util/BUILD index 32312aff44..b391f50527 100644 --- a/persistentworkers/src/main/java/persistent/common/util/BUILD +++ b/persistentworkers/src/main/java/persistent/common/util/BUILD @@ -2,5 +2,4 @@ java_library( name = "util", srcs = glob(["**/*.java"]), visibility = ["//visibility:public"], - deps = [], ) diff --git a/persistentworkers/src/main/protobuf/BUILD b/persistentworkers/src/main/protobuf/BUILD index bec20e6ff7..9c92e04c4c 100755 --- a/persistentworkers/src/main/protobuf/BUILD +++ b/persistentworkers/src/main/protobuf/BUILD @@ -1,5 +1,6 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") load("@bazel_tools//tools/build_rules:utilities.bzl", "java_library_srcs") +load("@rules_java//java:defs.bzl", "java_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") package(default_visibility = ["//visibility:public"]) diff --git a/persistentworkers/src/test/java/persistent/bazel/BUILD b/persistentworkers/src/test/java/persistent/bazel/BUILD index 0cf829a111..b85db78006 100644 --- a/persistentworkers/src/test/java/persistent/bazel/BUILD +++ b/persistentworkers/src/test/java/persistent/bazel/BUILD @@ -3,6 +3,7 @@ COMMON_DEPS = [ "//persistentworkers/src/main/java/persistent/bazel:bazel-persistent-workers", "//persistentworkers/src/test/java/persistent/testutil:testutil", "//persistentworkers/src/main/protobuf:worker_protocol_java_proto", + "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_truth_truth", "@maven//:commons_io_commons_io", diff --git a/persistentworkers/src/test/java/persistent/bazel/processes/PersistentWorkerTest.java b/persistentworkers/src/test/java/persistent/bazel/processes/PersistentWorkerTest.java index 0cdc68a7ff..9712394203 100644 --- a/persistentworkers/src/test/java/persistent/bazel/processes/PersistentWorkerTest.java +++ b/persistentworkers/src/test/java/persistent/bazel/processes/PersistentWorkerTest.java @@ -16,6 +16,7 @@ import persistent.bazel.client.PersistentWorker; import persistent.bazel.client.WorkerKey; +import persistent.common.processes.JavaProcessWrapper; import persistent.testutil.ProcessUtils; import persistent.testutil.WorkerUtils; @@ -55,7 +56,7 @@ public void endToEndAdder() throws Exception { ); ImmutableList initCmd = ImmutableList.of( - "java", + JavaProcessWrapper.CURRENT_JVM_COMMAND, "-cp", jarPath.toString(), "adder.Adder", diff --git a/run_server b/run_server index 7698063fef..759b31743d 100755 --- a/run_server +++ b/run_server @@ -3,7 +3,7 @@ set -e set -o pipefail # Run Redis docker container -docker start buildfarm-redis || docker run -d --rm --name buildfarm-redis -p 6379:6379 redis:5.0.9 +docker start buildfarm-redis || docker run -d --rm --name buildfarm-redis -p 6379:6379 redis:7.2.4 redis-cli config set stop-writes-on-bgsave-error no # Determine which configuration file to use - default or user provided @@ -15,4 +15,4 @@ else fi # Run Server -bazelisk run //src/main/java/build/buildfarm:buildfarm-server -- --jvm_flag=-Dlogging.config=file:$PWD/examples/logging.properties $config \ No newline at end of file +bazelisk run //src/main/java/build/buildfarm:buildfarm-server -- --jvm_flag=-Dlogging.config=file:$PWD/examples/logging.properties $config diff --git a/run_worker b/run_worker index f3b84fdccf..f1da296906 100755 --- a/run_worker +++ b/run_worker @@ -3,7 +3,7 @@ set -e set -o pipefail # Run Redis docker container -docker start buildfarm-redis || docker run -d --rm --name buildfarm-redis -p 6379:6379 redis:5.0.9 +docker start buildfarm-redis || docker run -d --rm --name buildfarm-redis -p 6379:6379 redis:7.2.4 redis-cli config set stop-writes-on-bgsave-error no # Determine which configuration file to use - default or user provided @@ -15,4 +15,4 @@ else fi # Run Server -bazelisk run //src/main/java/build/buildfarm:buildfarm-shard-worker -- --jvm_flag=-Dlogging.config=file:$PWD/examples/logging.properties $config \ No newline at end of file +bazelisk run //src/main/java/build/buildfarm:buildfarm-shard-worker -- --jvm_flag=-Dlogging.config=file:$PWD/examples/logging.properties $config diff --git a/src/main/java/build/buildfarm/BUILD b/src/main/java/build/buildfarm/BUILD index 601aa38eb4..3cbdeb5231 100644 --- a/src/main/java/build/buildfarm/BUILD +++ b/src/main/java/build/buildfarm/BUILD @@ -1,4 +1,4 @@ -load("//:jvm_flags.bzl", "ensure_accurate_metadata") +load("//:jvm_flags.bzl", "add_opens_sun_nio_fs", "ensure_accurate_metadata") package( default_visibility = ["//src:__subpackages__"], @@ -15,7 +15,7 @@ java_binary( classpath_resources = [ ":configs", ], - jvm_flags = ensure_accurate_metadata(), + jvm_flags = ensure_accurate_metadata() + add_opens_sun_nio_fs(), main_class = "build.buildfarm.server.BuildFarmServer", visibility = ["//visibility:public"], runtime_deps = [ @@ -29,7 +29,7 @@ java_binary( classpath_resources = [ ":configs", ], - jvm_flags = ensure_accurate_metadata(), + jvm_flags = ensure_accurate_metadata() + add_opens_sun_nio_fs(), main_class = "build.buildfarm.worker.shard.Worker", visibility = ["//visibility:public"], runtime_deps = [ diff --git a/src/main/java/build/buildfarm/actioncache/BUILD b/src/main/java/build/buildfarm/actioncache/BUILD index 46e8614c90..efe57cdbf2 100644 --- a/src/main/java/build/buildfarm/actioncache/BUILD +++ b/src/main/java/build/buildfarm/actioncache/BUILD @@ -6,13 +6,13 @@ java_library( "//src/main/java/build/buildfarm/backplane", "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/common/config", + "//third_party/remote-apis:build_bazel_remote_execution_v2_remote_execution_java_grpc", "@maven//:com_github_ben_manes_caffeine_caffeine", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_core", "@maven//:net_javacrumbs_future_converter_future_converter_java8_guava", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_grpc", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/admin/Admin.java b/src/main/java/build/buildfarm/admin/Admin.java deleted file mode 100644 index ca739f2347..0000000000 --- a/src/main/java/build/buildfarm/admin/Admin.java +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2020 The Bazel Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package build.buildfarm.admin; - -import build.buildfarm.v1test.GetHostsResult; - -public interface Admin { - void terminateHost(String hostId); - - void stopContainer(String hostId, String containerName); - - GetHostsResult getHosts(String filter, int ageInMinutes, String status); - - void scaleCluster( - String scaleGroupName, - Integer minHosts, - Integer maxHosts, - Integer targetHosts, - Integer targetReservedHostsPercent); - - void disableHostScaleInProtection(String instanceName); - - void disableHostScaleInProtection(String clusterEndpoint, String instanceIp); -} diff --git a/src/main/java/build/buildfarm/admin/BUILD b/src/main/java/build/buildfarm/admin/BUILD deleted file mode 100644 index a5e481399e..0000000000 --- a/src/main/java/build/buildfarm/admin/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -java_library( - name = "admin", - srcs = glob(["*.java"]), - visibility = ["//visibility:public"], - deps = [ - "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", - "@googleapis//:google_rpc_status_java_proto", - "@maven//:com_google_guava_guava", - "@maven//:com_google_protobuf_protobuf_java", - "@maven//:com_google_protobuf_protobuf_java_util", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", - ], -) diff --git a/src/main/java/build/buildfarm/admin/aws/AwsAdmin.java b/src/main/java/build/buildfarm/admin/aws/AwsAdmin.java deleted file mode 100644 index 5b971405cd..0000000000 --- a/src/main/java/build/buildfarm/admin/aws/AwsAdmin.java +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2020 The Bazel Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package build.buildfarm.admin.aws; - -import build.buildfarm.admin.Admin; -import build.buildfarm.common.config.BuildfarmConfigs; -import build.buildfarm.v1test.AdminGrpc; -import build.buildfarm.v1test.DisableScaleInProtectionRequest; -import build.buildfarm.v1test.GetHostsResult; -import build.buildfarm.v1test.Host; -import com.amazonaws.services.autoscaling.AmazonAutoScaling; -import com.amazonaws.services.autoscaling.AmazonAutoScalingClientBuilder; -import com.amazonaws.services.autoscaling.model.InstancesDistribution; -import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy; -import com.amazonaws.services.autoscaling.model.SetInstanceProtectionRequest; -import com.amazonaws.services.autoscaling.model.SetInstanceProtectionResult; -import com.amazonaws.services.autoscaling.model.UpdateAutoScalingGroupRequest; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.AmazonEC2ClientBuilder; -import com.amazonaws.services.ec2.model.DescribeInstancesRequest; -import com.amazonaws.services.ec2.model.DescribeInstancesResult; -import com.amazonaws.services.ec2.model.Filter; -import com.amazonaws.services.ec2.model.Instance; -import com.amazonaws.services.ec2.model.Reservation; -import com.amazonaws.services.ec2.model.Tag; -import com.amazonaws.services.ec2.model.TerminateInstancesRequest; -import com.amazonaws.services.simplesystemsmanagement.AWSSimpleSystemsManagement; -import com.amazonaws.services.simplesystemsmanagement.AWSSimpleSystemsManagementClientBuilder; -import com.amazonaws.services.simplesystemsmanagement.model.SendCommandRequest; -import com.google.protobuf.util.Timestamps; -import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.TimeZone; -import java.util.logging.Level; -import lombok.extern.java.Log; -import org.springframework.stereotype.Component; - -@Log -@Component -public class AwsAdmin implements Admin { - private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - private AmazonAutoScaling scale; - private AmazonEC2 ec2; - private AWSSimpleSystemsManagement ssm; - - public AwsAdmin() { - String region = configs.getServer().getCloudRegion(); - if (region != null) { - scale = AmazonAutoScalingClientBuilder.standard().withRegion(region).build(); - ec2 = AmazonEC2ClientBuilder.standard().withRegion(region).build(); - ssm = AWSSimpleSystemsManagementClientBuilder.standard().withRegion(region).build(); - } else { - log.warning("Missing cloudRegion configuration. AWS Admin will not be enabled."); - } - } - - @Override - public void terminateHost(String hostId) { - ec2.terminateInstances(new TerminateInstancesRequest().withInstanceIds(hostId)); - log.log(Level.INFO, String.format("Terminated host: %s", hostId)); - } - - @Override - public void stopContainer(String hostId, String containerName) { - String stopContainerCmd = - "docker ps | grep " + containerName + " | awk '{print $1 }' | xargs -I {} docker stop {}"; - Map> parameters = new HashMap<>(); - parameters.put("commands", Collections.singletonList(stopContainerCmd)); - ssm.sendCommand( - new SendCommandRequest() - .withDocumentName("AWS-RunShellScript") - .withInstanceIds(hostId) - .withParameters(parameters)); - log.log(Level.INFO, String.format("Stopped container: %s on host: %s", containerName, hostId)); - } - - @Override - public GetHostsResult getHosts(String filter, int ageInMinutes, String status) { - GetHostsResult.Builder resultBuilder = GetHostsResult.newBuilder(); - List hosts = new ArrayList<>(); - DescribeInstancesResult instancesResult = - ec2.describeInstances( - new DescribeInstancesRequest() - .withFilters(new Filter().withName("tag-value").withValues(filter))); - long hostNum = 1L; - for (Reservation r : instancesResult.getReservations()) { - for (Instance e : r.getInstances()) { - long uptime = getHostUptimeInMinutes(e.getLaunchTime()); - if (e.getPrivateIpAddress() != null - && uptime > ageInMinutes - && status.equalsIgnoreCase(e.getState().getName())) { - Host.Builder hostBuilder = Host.newBuilder(); - hostBuilder.setHostNum(hostNum++); - hostBuilder.setDnsName(e.getPrivateDnsName()); - hostBuilder.setHostId(e.getInstanceId()); - hostBuilder.setIpAddress(e.getPrivateIpAddress()); - hostBuilder.setLaunchTime(Timestamps.fromMillis(e.getLaunchTime().getTime())); - hostBuilder.setLifecycle( - e.getInstanceLifecycle() != null ? e.getInstanceLifecycle() : "on demand"); - hostBuilder.setNumCores(e.getCpuOptions().getCoreCount()); - hostBuilder.setState(e.getState().getName()); - hostBuilder.setType(e.getInstanceType()); - hostBuilder.setUptimeMinutes(uptime); - hosts.add(hostBuilder.build()); - } - } - } - resultBuilder.addAllHosts(hosts); - resultBuilder.setNumHosts(hosts.size()); - log.log(Level.FINE, String.format("Got %d hosts for filter: %s", hosts.size(), filter)); - return resultBuilder.build(); - } - - @Override - public void scaleCluster( - String scaleGroupName, - Integer minHosts, - Integer maxHosts, - Integer targetHosts, - Integer targetReservedHostsPercent) { - UpdateAutoScalingGroupRequest request = - new UpdateAutoScalingGroupRequest().withAutoScalingGroupName(scaleGroupName); - if (minHosts != null) { - request.setMinSize(minHosts); - } - if (maxHosts != null) { - request.setMaxSize(maxHosts); - } - if (targetHosts != null) { - request.setMaxSize(targetHosts); - } - if (targetReservedHostsPercent != null) { - request.setMixedInstancesPolicy( - new MixedInstancesPolicy() - .withInstancesDistribution( - new InstancesDistribution() - .withOnDemandPercentageAboveBaseCapacity(targetReservedHostsPercent))); - } - scale.updateAutoScalingGroup(request); - log.log(Level.INFO, String.format("Scaled: %s", scaleGroupName)); - } - - private long getHostUptimeInMinutes(Date launchTime) { - Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC")); - return (cal.getTime().getTime() - launchTime.getTime()) / 60000; - } - - /** - * Disable instance scale in protection so that auto scaler can shutdown the instance. - * - * @param privateDnsName the private Dns name of instance (i.e. ip-xx-xxx-xx-xx.ec2.internal) - */ - @Override - public void disableHostScaleInProtection(String privateDnsName) { - // 1 get AutoScalingGroup and InstanceId - Instance workerInstance = getInstanceId(privateDnsName); - if (workerInstance == null) { - String errorMessage = "Cannot find instance with private DNS name " + privateDnsName; - log.log(Level.SEVERE, errorMessage); - throw new RuntimeException(errorMessage); - } - String instanceId = workerInstance.getInstanceId(); - String autoScalingGroup = getTagValue(workerInstance.getTags()); - if (autoScalingGroup == null || autoScalingGroup.length() == 0) { - String errorMessage = - "Cannot find AutoScalingGroup name of worker with private DNS name " + privateDnsName; - log.log(Level.SEVERE, errorMessage); - throw new RuntimeException(errorMessage); - } - - // 2 disable scale in protection of the worker - SetInstanceProtectionRequest disableProtectionRequest = - new SetInstanceProtectionRequest() - .withInstanceIds(instanceId) - .withAutoScalingGroupName(autoScalingGroup) - .withProtectedFromScaleIn(false); - SetInstanceProtectionResult result = scale.setInstanceProtection(disableProtectionRequest); - log.log( - Level.INFO, - String.format( - "Disable protection of host: %s in AutoScalingGroup: %s and get result: %s", - instanceId, autoScalingGroup, result.toString())); - } - - @Override - public void disableHostScaleInProtection(String clusterEndpoint, String instanceIp) { - ManagedChannel channel = null; - try { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(clusterEndpoint).negotiationType(NegotiationType.PLAINTEXT); - channel = builder.build(); - AdminGrpc.AdminBlockingStub adminBlockingStub = AdminGrpc.newBlockingStub(channel); - adminBlockingStub.disableScaleInProtection( - DisableScaleInProtectionRequest.newBuilder().setInstanceName(instanceIp).build()); - } finally { - if (channel != null) { - channel.shutdown(); - } - } - } - - private String getTagValue(List tags) { - for (Tag tag : tags) { - if ("aws:autoscaling:groupName".equalsIgnoreCase(tag.getKey())) { - return tag.getValue(); - } - } - return null; - } - - private Instance getInstanceId(String privateDnsName) { - DescribeInstancesRequest describeInstancesRequest = - new DescribeInstancesRequest() - .withFilters(new Filter().withName("private-dns-name").withValues(privateDnsName)); - DescribeInstancesResult instancesResult = ec2.describeInstances(describeInstancesRequest); - for (Reservation r : instancesResult.getReservations()) { - for (Instance e : r.getInstances()) { - if (e.getPrivateDnsName() != null && e.getPrivateDnsName().equals(privateDnsName)) { - return e; - } - } - } - return null; - } -} diff --git a/src/main/java/build/buildfarm/admin/aws/BUILD b/src/main/java/build/buildfarm/admin/aws/BUILD deleted file mode 100644 index ea544e0b68..0000000000 --- a/src/main/java/build/buildfarm/admin/aws/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -java_library( - name = "aws", - srcs = glob(["*.java"]), - plugins = ["//src/main/java/build/buildfarm/common:lombok"], - visibility = ["//visibility:public"], - deps = [ - "//src/main/java/build/buildfarm/admin", - "//src/main/java/build/buildfarm/common/config", - "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_grpc", - "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", - "@googleapis//:google_rpc_status_java_proto", - "@maven//:com_amazonaws_aws_java_sdk_autoscaling", - "@maven//:com_amazonaws_aws_java_sdk_core", - "@maven//:com_amazonaws_aws_java_sdk_ec2", - "@maven//:com_amazonaws_aws_java_sdk_secretsmanager", - "@maven//:com_amazonaws_aws_java_sdk_ssm", - "@maven//:com_google_guava_guava", - "@maven//:com_google_protobuf_protobuf_java_util", - "@maven//:io_grpc_grpc_api", - "@maven//:io_grpc_grpc_netty", - "@maven//:org_projectlombok_lombok", - "@maven//:org_springframework_spring_beans", - "@maven//:org_springframework_spring_context", - "@maven//:org_springframework_spring_core", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", - ], -) diff --git a/src/main/java/build/buildfarm/admin/gcp/BUILD b/src/main/java/build/buildfarm/admin/gcp/BUILD deleted file mode 100644 index 3d94b91f3f..0000000000 --- a/src/main/java/build/buildfarm/admin/gcp/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -java_library( - name = "gcp", - srcs = glob(["*.java"]), - visibility = ["//visibility:public"], - deps = [ - "//src/main/java/build/buildfarm/admin", - "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", - "@googleapis//:google_rpc_status_java_proto", - "@maven//:com_google_guava_guava", - "@maven//:com_google_protobuf_protobuf_java_util", - "@maven//:org_springframework_spring_beans", - "@maven//:org_springframework_spring_context", - "@maven//:org_springframework_spring_core", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", - ], -) diff --git a/src/main/java/build/buildfarm/admin/gcp/GcpAdmin.java b/src/main/java/build/buildfarm/admin/gcp/GcpAdmin.java deleted file mode 100644 index 34ee9a0163..0000000000 --- a/src/main/java/build/buildfarm/admin/gcp/GcpAdmin.java +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2020 The Bazel Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package build.buildfarm.admin.gcp; - -import build.buildfarm.admin.Admin; -import build.buildfarm.v1test.GetHostsResult; -import org.springframework.stereotype.Component; - -@Component -public class GcpAdmin implements Admin { - @Override - public void terminateHost(String hostId) { - throw new UnsupportedOperationException("Not Implemented."); - } - - @Override - public void stopContainer(String hostId, String containerName) { - throw new UnsupportedOperationException("Not Implemented."); - } - - @Override - public GetHostsResult getHosts(String filter, int ageInMinutes, String status) { - throw new UnsupportedOperationException("Not Implemented."); - } - - @Override - public void scaleCluster( - String scaleGroupName, - Integer minHosts, - Integer maxHosts, - Integer targetHosts, - Integer targetReservedHostsPercent) { - throw new UnsupportedOperationException("Not Implemented."); - } - - @Override - public void disableHostScaleInProtection(String instanceName) { - throw new UnsupportedOperationException("Not Implemented."); - } - - @Override - public void disableHostScaleInProtection(String clusterEndpoint, String instanceIp) { - throw new UnsupportedOperationException("Not Implemented"); - } -} diff --git a/src/main/java/build/buildfarm/backplane/BUILD b/src/main/java/build/buildfarm/backplane/BUILD index 090d2cd2ef..e694fedc55 100644 --- a/src/main/java/build/buildfarm/backplane/BUILD +++ b/src/main/java/build/buildfarm/backplane/BUILD @@ -7,9 +7,9 @@ java_library( "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/operations", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_longrunning_operations_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", "@maven//:com_google_guava_guava", "@maven//:net_jcip_jcip_annotations", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/backplane/Backplane.java b/src/main/java/build/buildfarm/backplane/Backplane.java index b11176774f..b1b05d5c42 100644 --- a/src/main/java/build/buildfarm/backplane/Backplane.java +++ b/src/main/java/build/buildfarm/backplane/Backplane.java @@ -98,6 +98,12 @@ FindOperationsResults findEnrichedOperations(Instance instance, String filterPre Iterable> getOperations(Set operationIds) throws IOException; + /** Returns a map of the worker name and its start time for given workers. */ + Map getWorkersStartTimeInEpochSecs(Set workerNames) throws IOException; + + /** Returns the insert time epoch in seconds for the digest. */ + long getDigestInsertTime(Digest blobDigest) throws IOException; + /** Returns a set of the names of all active storage workers. */ Set getStorageWorkers() throws IOException; @@ -276,4 +282,7 @@ boolean pollOperation(QueueEntry queueEntry, ExecutionStage.Value stage, long re Boolean propertiesEligibleForQueue(List provisions); GetClientStartTimeResult getClientStartTime(GetClientStartTimeRequest request) throws IOException; + + /** Set expiry time for digests */ + void updateDigestsExpiry(Iterable digests) throws IOException; } diff --git a/src/main/java/build/buildfarm/cas/BUILD b/src/main/java/build/buildfarm/cas/BUILD index 301e922e13..f2ab5c5a83 100644 --- a/src/main/java/build/buildfarm/cas/BUILD +++ b/src/main/java/build/buildfarm/cas/BUILD @@ -8,15 +8,17 @@ java_library( ], deps = [ "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common:BuildfarmExecutors", "//src/main/java/build/buildfarm/common/config", "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/common/resources", "//src/main/java/build/buildfarm/common/resources:resource_java_proto", "//src/main/java/build/buildfarm/instance/stub", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_rpc_code_java_proto", + "//third_party/remote-apis:build_bazel_remote_execution_v2_remote_execution_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_github_jnr_jnr_ffi", "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", @@ -25,13 +27,12 @@ java_library( "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", + "@maven//:io_netty_netty_codec_http", "@maven//:io_prometheus_simpleclient", "@maven//:net_jcip_jcip_annotations", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_grpc", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/cas/ContentAddressableStorages.java b/src/main/java/build/buildfarm/cas/ContentAddressableStorages.java index ade381ff50..5956296ba2 100644 --- a/src/main/java/build/buildfarm/cas/ContentAddressableStorages.java +++ b/src/main/java/build/buildfarm/cas/ContentAddressableStorages.java @@ -14,6 +14,7 @@ package build.buildfarm.cas; +import static build.buildfarm.common.grpc.Channels.createChannel; import static build.buildfarm.common.grpc.Retrier.NO_RETRIES; import static com.google.common.collect.Multimaps.synchronizedListMultimap; import static com.google.common.util.concurrent.MoreExecutors.directExecutor; @@ -29,8 +30,6 @@ import com.google.common.collect.ListMultimap; import com.google.common.collect.MultimapBuilder; import io.grpc.Channel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; @@ -40,12 +39,6 @@ public final class ContentAddressableStorages { private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - private static Channel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - public static ContentAddressableStorage createGrpcCAS(Cas cas) { Channel channel = createChannel(cas.getTarget()); ByteStreamUploader byteStreamUploader = @@ -85,8 +78,8 @@ public static ContentAddressableStorage createFilesystemCAS(Cas config) config, configs.getMaxEntrySizeBytes(), DigestUtil.forHash("SHA256"), - /* expireService=*/ newDirectExecutorService(), - /* accessRecorder=*/ directExecutor()) { + /* expireService= */ newDirectExecutorService(), + /* accessRecorder= */ directExecutor()) { @Override protected InputStream newExternalInput( Compressor.Value compressor, Digest digest, long offset) throws IOException { diff --git a/src/main/java/build/buildfarm/cas/GrpcCAS.java b/src/main/java/build/buildfarm/cas/GrpcCAS.java index 5640962456..be660e9b7b 100644 --- a/src/main/java/build/buildfarm/cas/GrpcCAS.java +++ b/src/main/java/build/buildfarm/cas/GrpcCAS.java @@ -121,7 +121,7 @@ private InputStream newStreamInput(String resourceName, long offset) throws IOEx bsStub, NO_RETRIES::newBackoff, NO_RETRIES::isRetriable, - /* retryService=*/ null); + /* retryService= */ null); } private String readResourceName(Compressor.Value compressor, Digest digest) { @@ -237,7 +237,7 @@ public ListenableFuture> getAllFuture(Iterable digests) { @Override public Blob get(Digest digest) { try (InputStream in = - newStreamInput(readResourceName(Compressor.Value.IDENTITY, digest), /* offset=*/ 0)) { + newStreamInput(readResourceName(Compressor.Value.IDENTITY, digest), /* offset= */ 0)) { ByteString content = ByteString.readFrom(in); if (content.size() != digest.getSizeBytes()) { throw new IOException( @@ -282,7 +282,7 @@ public static Write newWrite( resourceName, Functions.identity(), digest.getSizeBytes(), - /* autoflush=*/ false); + /* autoflush= */ false); } @Override diff --git a/src/main/java/build/buildfarm/cas/MemoryCAS.java b/src/main/java/build/buildfarm/cas/MemoryCAS.java index 08ac8540c2..2fe77528e8 100644 --- a/src/main/java/build/buildfarm/cas/MemoryCAS.java +++ b/src/main/java/build/buildfarm/cas/MemoryCAS.java @@ -63,7 +63,7 @@ public class MemoryCAS implements ContentAddressableStorage { private final Writes writes = new Writes(this); public MemoryCAS(long maxSizeInBytes) { - this(maxSizeInBytes, (digest) -> {}, /* delegate=*/ null); + this(maxSizeInBytes, (digest) -> {}, /* delegate= */ null); } public MemoryCAS( @@ -281,7 +281,8 @@ private synchronized boolean add(Blob blob, Runnable onExpiration) { log.log( Level.WARNING, String.format( - "Out of nodes to remove, sizeInBytes = %d, maxSizeInBytes = %d, storage = %d, list = %d", + "Out of nodes to remove, sizeInBytes = %d, maxSizeInBytes = %d, storage = %d, list =" + + " %d", sizeInBytes, maxSizeInBytes, storage.size(), size())); } diff --git a/src/main/java/build/buildfarm/cas/cfc/CASFileCache.java b/src/main/java/build/buildfarm/cas/cfc/CASFileCache.java index b9c07ace16..fa4d595830 100644 --- a/src/main/java/build/buildfarm/cas/cfc/CASFileCache.java +++ b/src/main/java/build/buildfarm/cas/cfc/CASFileCache.java @@ -93,6 +93,7 @@ import io.grpc.StatusException; import io.grpc.StatusRuntimeException; import io.grpc.stub.ServerCallStreamObserver; +import io.netty.handler.codec.http.QueryStringDecoder; import io.prometheus.client.Counter; import io.prometheus.client.Gauge; import io.prometheus.client.Histogram; @@ -100,6 +101,8 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.net.URI; +import java.net.URISyntaxException; import java.nio.channels.ClosedByInterruptException; import java.nio.channels.ClosedChannelException; import java.nio.file.FileAlreadyExistsException; @@ -127,6 +130,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Supplier; @@ -145,7 +149,19 @@ public abstract class CASFileCache implements ContentAddressableStorage { Gauge.build().name("cas_size").help("CAS size.").register(); private static final Gauge casEntryCountMetric = Gauge.build().name("cas_entry_count").help("Number of entries in the CAS.").register(); - private static Histogram casTtl; + private static Histogram casTtl = + Histogram.build() + .name("cas_ttl_s") + .buckets( + 3600, // 1 hour + 21600, // 6 hours + 86400, // 1 day + 345600, // 4 days + 604800, // 1 week + 1210000 // 2 weeks + ) + .help("The amount of time CAS entries live on L1 storage before expiration (seconds)") + .register(); private static final Gauge casCopyFallbackMetric = Gauge.build() @@ -160,7 +176,6 @@ public abstract class CASFileCache implements ContentAddressableStorage { private final EntryPathStrategy entryPathStrategy; private final long maxSizeInBytes; private final long maxEntrySizeInBytes; - private final boolean publishTtlMetric; private final boolean execRootFallback; private final DigestUtil digestUtil; private final ConcurrentMap keyReferences; @@ -169,7 +184,7 @@ public abstract class CASFileCache implements ContentAddressableStorage { private final Consumer> onExpire; private final Executor accessRecorder; private final ExecutorService expireService; - private Thread prometheusMetricsThread; // TODO make this final, stop on shutdown + private Thread prometheusMetricsThread; private final Map directoryStorage = Maps.newConcurrentMap(); private final DirectoriesIndex directoriesIndex; @@ -177,6 +192,19 @@ public abstract class CASFileCache implements ContentAddressableStorage { private final LockMap locks = new LockMap(); @Nullable private final ContentAddressableStorage delegate; private final boolean delegateSkipLoad; + private final LoadingCache keyLocks = + CacheBuilder.newBuilder() + .expireAfterAccess( + 1, MINUTES) // hopefully long enough for any of our file ops to take place and prevent + // collision + .build( + new CacheLoader() { + @Override + public Lock load(String key) { + // should be sufficient for what we're doing + return new ReentrantLock(); + } + }); private final LoadingCache writes = CacheBuilder.newBuilder() .expireAfterAccess(1, HOURS) @@ -206,7 +234,7 @@ public Write load(BlobWriteKey key) { @Override public SettableFuture load(Digest digest) { SettableFuture future = SettableFuture.create(); - if (containsLocal(digest, /* result=*/ null, (key) -> {})) { + if (containsLocal(digest, /* result= */ null, (key) -> {})) { future.set(digest.getSizeBytes()); } return future; @@ -294,17 +322,16 @@ public CASFileCache( maxEntrySizeInBytes, config.getHexBucketLevels(), config.isFileDirectoriesIndexInMemory(), - config.isPublishTtlMetric(), config.isExecRootCopyFallback(), digestUtil, expireService, accessRecorder, - /* storage=*/ Maps.newConcurrentMap(), - /* directoriesIndexDbName=*/ DEFAULT_DIRECTORIES_INDEX_NAME, - /* onPut=*/ (digest) -> {}, - /* onExpire=*/ (digests) -> {}, - /* delegate=*/ null, - /* delegateSkipLoad=*/ false); + /* storage= */ Maps.newConcurrentMap(), + /* directoriesIndexDbName= */ DEFAULT_DIRECTORIES_INDEX_NAME, + /* onPut= */ (digest) -> {}, + /* onExpire= */ (digests) -> {}, + /* delegate= */ null, + /* delegateSkipLoad= */ false); } public CASFileCache( @@ -313,7 +340,6 @@ public CASFileCache( long maxEntrySizeInBytes, int hexBucketLevels, boolean storeFileDirsIndexInMemory, - boolean publishTtlMetric, boolean execRootFallback, DigestUtil digestUtil, ExecutorService expireService, @@ -327,7 +353,6 @@ public CASFileCache( this.root = root; this.maxSizeInBytes = maxSizeInBytes; this.maxEntrySizeInBytes = maxEntrySizeInBytes; - this.publishTtlMetric = publishTtlMetric; this.execRootFallback = execRootFallback; this.digestUtil = digestUtil; this.expireService = expireService; @@ -339,21 +364,6 @@ public CASFileCache( this.delegateSkipLoad = delegateSkipLoad; this.directoriesIndexDbName = directoriesIndexDbName; this.keyReferences = Maps.newConcurrentMap(); - if (publishTtlMetric) { - casTtl = - Histogram.build() - .name("cas_ttl_s") - .buckets( - 3600, // 1 hour - 21600, // 6 hours - 86400, // 1 day - 345600, // 4 days - 604800, // 1 week - 1210000 // 2 weeks - ) - .help("The amount of time CAS entries live on L1 storage before expiration (seconds)") - .register(); - } entryPathStrategy = new HexBucketEntryPathStrategy(root, hexBucketLevels); @@ -538,7 +548,7 @@ private InputStream compressorInputStream(Compressor.Value compressor, InputStre @SuppressWarnings("ResultOfMethodCallIgnored") InputStream newLocalInput(Compressor.Value compressor, Digest digest, long offset) throws IOException { - log.log(Level.FINE, format("getting input stream for %s", DigestUtil.toString(digest))); + log.log(Level.FINER, format("getting input stream for %s", DigestUtil.toString(digest))); boolean isExecutable = false; do { String key = getKey(digest, isExecutable); @@ -591,7 +601,7 @@ public InputStream newInput(Compressor.Value compressor, Digest digest, long off @Override public Blob get(Digest digest) { - try (InputStream in = newInput(Compressor.Value.IDENTITY, digest, /* offset=*/ 0)) { + try (InputStream in = newInput(Compressor.Value.IDENTITY, digest, /* offset= */ 0)) { return new Blob(ByteString.readFrom(in), digest); } catch (NoSuchFileException e) { return null; @@ -602,6 +612,20 @@ public Blob get(Digest digest) { private static final int CHUNK_SIZE = 128 * 1024; + private static boolean shouldReadThrough(RequestMetadata requestMetadata) { + try { + URI uri = new URI(requestMetadata.getCorrelatedInvocationsId()); + QueryStringDecoder decoder = new QueryStringDecoder(uri); + return decoder + .parameters() + .getOrDefault("THROUGH", ImmutableList.of("false")) + .get(0) + .equals("true"); + } catch (URISyntaxException e) { + return false; + } + } + @Override public void get( Compressor.Value compressor, @@ -610,9 +634,28 @@ public void get( long count, ServerCallStreamObserver blobObserver, RequestMetadata requestMetadata) { + boolean readThrough = shouldReadThrough(requestMetadata); InputStream in; try { - in = newInput(compressor, digest, offset); + if (readThrough && !contains(digest, /* result= */ null)) { + // really need to be able to reuse/restart the same write over + // multiple requests - if we get successive read throughs for a single + // digest, we should pick up from where we were last time + // Also servers should affinitize + // And share data, so that they can pick the same worker to pull from + // if possible. + Write write = getWrite(compressor, digest, UUID.randomUUID(), requestMetadata); + blobObserver.setOnCancelHandler(write::reset); + in = + new ReadThroughInputStream( + newExternalInput(compressor, digest, 0), + localOffset -> newTransparentInput(compressor, digest, localOffset), + digest.getSizeBytes(), + offset, + write); + } else { + in = newInput(compressor, digest, offset); + } } catch (IOException e) { blobObserver.onError(e); return; @@ -712,7 +755,7 @@ void invalidateWrite(Digest digest) { public void put(Blob blob, Runnable onExpiration) throws InterruptedException { String key = getKey(blob.getDigest(), false); try { - log.log(Level.FINE, format("put: %s", key)); + log.log(Level.FINER, format("put: %s", key)); OutputStream out = putImpl( Compressor.Value.IDENTITY, @@ -720,9 +763,9 @@ public void put(Blob blob, Runnable onExpiration) throws InterruptedException { UUID.randomUUID(), () -> completeWrite(blob.getDigest()), blob.getDigest().getSizeBytes(), - /* isExecutable=*/ false, + /* isExecutable= */ false, () -> invalidateWrite(blob.getDigest()), - /* isReset=*/ true); + /* isReset= */ true); boolean referenced = out == null; try { if (out != null) { @@ -831,7 +874,7 @@ public void close() throws IOException { super.close(); } } finally { - onClosed.accept(/* cancelled=*/ false); + onClosed.accept(/* cancelled= */ false); } } @@ -840,17 +883,23 @@ public void cancel() throws IOException { try { out.cancel(); } finally { - onClosed.accept(/* cancelled=*/ true); + onClosed.accept(/* cancelled= */ true); } } } - Write newWrite(BlobWriteKey key, ListenableFuture future) { + Write newWrite(BlobWriteKey key, SettableFuture future) { Write write = new Write() { CancellableOutputStream out = null; + + @GuardedBy("this") boolean isReset = false; + + @GuardedBy("this") SettableFuture closedFuture = null; + + @GuardedBy("this") long fileCommittedSize = -1; @Override @@ -868,6 +917,9 @@ public synchronized void reset() { + key.getIdentifier(), e); } finally { + if (closedFuture != null) { + closedFuture.set(null); + } isReset = true; } } @@ -910,7 +962,7 @@ synchronized long getCommittedSizeFromOut() { public synchronized boolean isComplete() { return getFuture().isDone() || ((closedFuture == null || closedFuture.isDone()) - && containsLocal(key.getDigest(), /* result=*/ null, (key) -> {})); + && containsLocal(key.getDigest(), /* result= */ null, (key) -> {})); } @Override @@ -931,6 +983,11 @@ public synchronized ListenableFuture getOutputFuture( directExecutor()); } + private synchronized void syncCancelled() { + out = null; + isReset = true; + } + @Override public synchronized FeedbackOutputStream getOutput( long deadlineAfter, TimeUnit deadlineAfterUnits, Runnable onReadyHandler) @@ -939,6 +996,9 @@ public synchronized FeedbackOutputStream getOutput( // will block until it is returned via a close. if (closedFuture != null) { try { + while (!closedFuture.isDone()) { + wait(); + } closedFuture.get(); } catch (ExecutionException e) { throw new IOException(e.getCause()); @@ -947,26 +1007,34 @@ public synchronized FeedbackOutputStream getOutput( } } SettableFuture outClosedFuture = SettableFuture.create(); + Digest digest = key.getDigest(); UniqueWriteOutputStream uniqueOut = createUniqueWriteOutput( out, key.getCompressor(), - key.getDigest(), + digest, UUID.fromString(key.getIdentifier()), cancelled -> { if (cancelled) { - out = null; - isReset = true; + syncCancelled(); } outClosedFuture.set(null); }, this::isComplete, isReset); + if (uniqueOut.getPath() == null) { + // this is a duplicate output stream and the write is complete + future.set(key.getDigest().getSizeBytes()); + } commitOpenState(uniqueOut.delegate(), outClosedFuture); return uniqueOut; } - private void commitOpenState( + private synchronized void syncNotify() { + notify(); + } + + private synchronized void commitOpenState( CancellableOutputStream out, SettableFuture closedFuture) { // transition the Write to an open state, and modify all internal state required // atomically @@ -974,6 +1042,7 @@ private void commitOpenState( this.out = out; this.closedFuture = closedFuture; + closedFuture.addListener(this::syncNotify, directExecutor()); // they will likely write to this, so we can no longer assume isReset. // might want to subscribe to a write event on the stream isReset = false; @@ -1038,7 +1107,7 @@ CancellableOutputStream newOutput( String key = getKey(digest, false); final CancellableOutputStream cancellableOut; try { - log.log(Level.FINE, format("getWrite: %s", key)); + log.log(Level.FINER, format("getWrite: %s", key)); cancellableOut = putImpl( compressor, @@ -1046,7 +1115,7 @@ CancellableOutputStream newOutput( uuid, () -> completeWrite(digest), digest.getSizeBytes(), - /* isExecutable=*/ false, + /* isExecutable= */ false, () -> invalidateWrite(digest), isReset); } catch (InterruptedException e) { @@ -1095,14 +1164,14 @@ public void write(byte[] b, int off, int len) throws IOException { @Override public void cancel() throws IOException { - if (closed.compareAndSet(/* expected=*/ false, /* update=*/ true)) { + if (closed.compareAndSet(/* expected= */ false, /* update= */ true)) { cancellableOut.cancel(); } } @Override public void close() throws IOException { - if (closed.compareAndSet(/* expected=*/ false, /* update=*/ true)) { + if (closed.compareAndSet(/* expected= */ false, /* update= */ true)) { try { out.close(); decrementReference(key); @@ -1121,7 +1190,7 @@ public long getWrittenForClose() { @Override public void put(Blob blob) throws InterruptedException { - put(blob, /* onExpiration=*/ null); + put(blob, /* onExpiration= */ null); } @Override @@ -1148,7 +1217,7 @@ public void lock() { public void lockInterruptibly() throws InterruptedException { // attempt to atomically synchronize synchronized (locked) { - while (!locked.compareAndSet(/* expected=*/ false, /* update=*/ true)) { + while (!locked.compareAndSet(/* expected= */ false, /* update= */ true)) { locked.wait(); } } @@ -1175,7 +1244,7 @@ public boolean tryLock(long time, TimeUnit unit) { @Override public void unlock() { - if (!locked.compareAndSet(/* expected=*/ true, /* update=*/ false)) { + if (!locked.compareAndSet(/* expected= */ true, /* update= */ false)) { throw new IllegalMonitorStateException("the lock was not held"); } synchronized (locked) { @@ -1246,6 +1315,13 @@ public void initializeRootDirectory() throws IOException { fileStore = Files.getFileStore(root); } + public void stop() throws InterruptedException { + if (prometheusMetricsThread != null) { + prometheusMetricsThread.interrupt(); + prometheusMetricsThread.join(); + } + } + public StartupCacheResults start(boolean skipLoad) throws IOException, InterruptedException { return start(newDirectExecutorService(), skipLoad); } @@ -1277,7 +1353,8 @@ public StartupCacheResults start( loadResults = loadCache(onStartPut, removeDirectoryService); } else { // Skip loading the cache and ensure it is empty - Directories.remove(root, removeDirectoryService); + fileStore = Files.getFileStore(root); + Directories.remove(root, fileStore, removeDirectoryService); initializeRootDirectory(); } @@ -1298,7 +1375,7 @@ public StartupCacheResults start( try { casSizeMetric.set(size()); casEntryCountMetric.set(entryCount()); - TimeUnit.MINUTES.sleep(5); + MINUTES.sleep(5); } catch (InterruptedException e) { Thread.currentThread().interrupt(); break; @@ -1342,7 +1419,7 @@ private void deleteInvalidFileContent(List files, ExecutorService removeDi try { for (Path path : files) { if (Files.isDirectory(path)) { - Directories.remove(path, removeDirectoryService); + Directories.remove(path, fileStore, removeDirectoryService); } else { Files.delete(path); } @@ -1606,7 +1683,7 @@ private void joinThreads(ExecutorService pool, String message) throws Interrupte pool.shutdown(); while (!pool.isTerminated()) { log.log(Level.INFO, message); - pool.awaitTermination(1, TimeUnit.MINUTES); + pool.awaitTermination(1, MINUTES); } } @@ -1688,6 +1765,10 @@ public Path getPath(String filename) { return entryPathStrategy.getPath(filename); } + public Path getRemovingPath(String filename) { + return entryPathStrategy.getPath(filename + "_removed"); + } + private synchronized void dischargeAndNotify(long size) { discharge(size); notify(); @@ -1769,7 +1850,8 @@ private Entry waitForLastUnreferencedEntry(long blobSizeInBytes) throws Interrup log.log( Level.INFO, format( - "CASFileCache::expireEntry(%d) unreferenced list is empty, %d bytes, %d keys with %d references, min(%d, %s), max(%d, %s)", + "CASFileCache::expireEntry(%d) unreferenced list is empty, %d bytes, %d keys with %d" + + " references, min(%d, %s), max(%d, %s)", blobSizeInBytes, sizeInBytes, keys, references, min, minkey, max, maxkey)); wait(); if (sizeInBytes <= maxSizeInBytes) { @@ -1876,6 +1958,55 @@ private static boolean causedByInterrupted(Exception e) { || e instanceof ClosedByInterruptException; } + private Entry safeStorageInsertion(String key, Entry entry) { + Lock lock; + try { + lock = keyLocks.get(key); + } catch (ExecutionException e) { + // impossible without exception instantiating lock + throw new RuntimeException(e.getCause()); + } + + lock.lock(); + try { + return storage.putIfAbsent(key, entry); + } finally { + lock.unlock(); + } + } + + private Entry safeStorageRemoval(String key) throws IOException { + Path path = getPath(key); + Path expiredPath = getRemovingPath(key); + boolean deleteExpiredPath = false; + + Lock lock; + try { + lock = keyLocks.get(key); + } catch (ExecutionException e) { + // impossible without exception instantiating lock + throw new IOException(e); + } + + lock.lock(); + try { + Files.createLink(expiredPath, path); + deleteExpiredPath = true; + Files.delete(path); + deleteExpiredPath = false; + return storage.remove(key); + } finally { + if (deleteExpiredPath) { + try { + Files.delete(expiredPath); + } catch (IOException e) { + log.log(Level.SEVERE, "error cleaning up after failed safeStorageRemoval", e); + } + } + lock.unlock(); + } + } + private int getLockedReferenceCount(Entry e) { synchronized (this) { Integer keyCt = keyReferences.get(e.key); @@ -1899,7 +2030,8 @@ private ListenableFuture expireEntry(long blobSizeInBytes, ExecutorServic e = waitForLastUnreferencedEntry(blobSizeInBytes)) { if (e.referenceCount != 0) { throw new IllegalStateException( - "ERROR: Reference counts lru ordering has not been maintained correctly, attempting to expire referenced (or negatively counted) content " + "ERROR: Reference counts lru ordering has not been maintained correctly, attempting to" + + " expire referenced (or negatively counted) content " + e.key + " with " + e.referenceCount @@ -1911,7 +2043,7 @@ private ListenableFuture expireEntry(long blobSizeInBytes, ExecutorServic } catch (IOException ioEx) { interrupted = causedByInterrupted(ioEx); } - Entry removedEntry = storage.remove(e.key); + Entry removedEntry = safeStorageRemoval(e.key); // reference compare on purpose if (removedEntry == e) { ListenableFuture entryFuture = dischargeEntryFuture(e, service); @@ -1958,7 +2090,7 @@ private ListenableFuture expireDirectory(Digest digest, ExecutorService se return immediateFuture(null); } - return Directories.remove(getDirectoryPath(digest), service); + return Directories.remove(getDirectoryPath(digest), fileStore, service); } @SuppressWarnings("ConstantConditions") @@ -2091,8 +2223,8 @@ private void removeFilePath(Path path) throws IOException { } if (Files.isDirectory(temp)) { - log.log(Level.INFO, "removing existing directory " + path + " for fetch"); - Directories.remove(temp); + log.log(Level.FINER, "removing existing directory " + path + " for fetch"); + Directories.remove(temp, fileStore); } else { Files.delete(temp); } @@ -2228,23 +2360,23 @@ private void getDirectoryKeys( } } - public ListenableFuture putDirectory( + public ListenableFuture putDirectory( Digest digest, Map directoriesIndex, ExecutorService service) { // Claim lock. // Claim the directory path so no other threads try to create/delete it. Path path = getDirectoryPath(digest); Lock l = locks.acquire(path); - log.log(Level.FINE, format("locking directory %s", path.getFileName())); + log.log(Level.FINER, format("locking directory %s", path.getFileName())); try { l.lockInterruptibly(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return immediateFailedFuture(e); } - log.log(Level.FINE, format("locked directory %s", path.getFileName())); + log.log(Level.FINER, format("locked directory %s", path.getFileName())); // Now that a lock has been claimed, we can proceed to create the directory. - ListenableFuture putFuture; + ListenableFuture putFuture; try { putFuture = putDirectorySynchronized(path, digest, directoriesIndex, service); } catch (IOException e) { @@ -2255,7 +2387,7 @@ public ListenableFuture putDirectory( putFuture.addListener( () -> { l.unlock(); - log.log(Level.FINE, format("directory %s has been unlocked", path.getFileName())); + log.log(Level.FINER, format("directory %s has been unlocked", path.getFileName())); }, service); return putFuture; @@ -2299,44 +2431,29 @@ private boolean directoryEntryExists( return false; } - static class PutDirectoryException extends IOException { + public static class PathResult { private final Path path; - private final Digest digest; - private final List exceptions; + private final boolean missed; - PutDirectoryException(Path path, Digest digest, List exceptions) { - // When printing the exception, show the captured sub-exceptions. - super(getErrorMessage(path, exceptions)); + public PathResult(Path path, boolean missed) { this.path = path; - this.digest = digest; - this.exceptions = exceptions; - for (Throwable exception : exceptions) { - addSuppressed(exception); - } + this.missed = missed; } - Path getPath() { + public Path getPath() { return path; } - Digest getDigest() { - return digest; - } - - List getExceptions() { - return exceptions; + public boolean getMissed() { + return missed; } } - private static String getErrorMessage(Path path, List exceptions) { - return String.format("%s: %d %s: %s", path, exceptions.size(), "exceptions", exceptions); - } - @SuppressWarnings("ConstantConditions") - private ListenableFuture putDirectorySynchronized( + private ListenableFuture putDirectorySynchronized( Path path, Digest digest, Map directoriesByDigest, ExecutorService service) throws IOException { - log.log(Level.FINE, format("directory %s has been locked", path.getFileName())); + log.log(Level.FINER, format("directory %s has been locked", path.getFileName())); ListenableFuture expireFuture; synchronized (this) { DirectoryEntry e = directoryStorage.get(digest); @@ -2350,7 +2467,8 @@ private ListenableFuture putDirectorySynchronized( log.log( Level.SEVERE, format( - "CASFileCache::putDirectory(%s) exists, but input %s does not, purging it with fire and resorting to fetch", + "CASFileCache::putDirectory(%s) exists, but input %s does not, purging it with" + + " fire and resorting to fetch", DigestUtil.toString(digest), input)); e = null; break; @@ -2363,20 +2481,21 @@ private ListenableFuture putDirectorySynchronized( } if (e != null) { - log.log(Level.FINE, format("found existing entry for %s", path.getFileName())); + log.log(Level.FINER, format("found existing entry for %s", path.getFileName())); if (directoryEntryExists(path, e, directoriesByDigest)) { - return immediateFuture(path); + return immediateFuture(new PathResult(path, /* missed= */ false)); } log.log( Level.SEVERE, format( - "directory %s does not exist in cache, purging it with fire and resorting to fetch", + "directory %s does not exist in cache, purging it with fire and resorting to" + + " fetch", path.getFileName())); } decrementReferencesSynchronized(inputsBuilder.build(), ImmutableList.of()); expireFuture = expireDirectory(digest, service); - log.log(Level.FINE, format("expiring existing entry for %s", path.getFileName())); + log.log(Level.FINER, format("expiring existing entry for %s", path.getFileName())); } } @@ -2398,7 +2517,7 @@ private ListenableFuture putDirectorySynchronized( transformAsync( deindexFuture, result -> { - log.log(Level.FINE, format("expiry complete, fetching %s", path.getFileName())); + log.log(Level.FINER, format("expiry complete, fetching %s", path.getFileName())); ImmutableList.Builder> putFuturesBuilder = ImmutableList.builder(); fetchDirectory( @@ -2419,7 +2538,10 @@ private ListenableFuture putDirectorySynchronized( try { putFutures.get(i).get(); // should never get here + } catch (ExecutionException e) { + failures.add(e.getCause()); } catch (Throwable t) { + // cancelled or interrupted during get failures.add(t); } } @@ -2439,7 +2561,7 @@ private ListenableFuture putDirectorySynchronized( fetchFuture, (result) -> { try { - disableAllWriteAccess(path); + disableAllWriteAccess(path, fileStore); } catch (IOException e) { log.log(Level.SEVERE, "error while disabling write permissions on " + path, e); return immediateFailedFuture(e); @@ -2469,8 +2591,8 @@ private ListenableFuture putDirectorySynchronized( } } try { - log.log(Level.FINE, "removing directory to roll back " + path); - Directories.remove(path); + log.log(Level.FINER, "removing directory to roll back " + path); + Directories.remove(path, fileStore); } catch (IOException removeException) { log.log( Level.SEVERE, @@ -2485,7 +2607,8 @@ private ListenableFuture putDirectorySynchronized( return transform( rollbackFuture, (results) -> { - log.log(Level.FINE, format("directory fetch complete, inserting %s", path.getFileName())); + log.log( + Level.FINER, format("directory fetch complete, inserting %s", path.getFileName())); DirectoryEntry e = new DirectoryEntry( // might want to have this treatment ahead of this @@ -2494,7 +2617,7 @@ private ListenableFuture putDirectorySynchronized( : directoriesByDigest.get(digest), Deadline.after(10, SECONDS)); directoryStorage.put(digest, e); - return path; + return new PathResult(path, /* missed= */ true); }, service); } @@ -2529,7 +2652,7 @@ Path putAndCopy(Digest digest, boolean isExecutable) throws IOException, Interru digest.getSizeBytes(), isExecutable, () -> invalidateWrite(digest), - /* isReset=*/ true); + /* isReset= */ true); if (out != null) { boolean complete = false; try { @@ -2537,13 +2660,13 @@ Path putAndCopy(Digest digest, boolean isExecutable) throws IOException, Interru complete = true; } finally { try { - log.log(Level.FINE, format("closing output stream for %s", DigestUtil.toString(digest))); + log.log(Level.FINER, format("closing output stream for %s", DigestUtil.toString(digest))); if (complete) { out.close(); } else { out.cancel(); } - log.log(Level.FINE, format("output stream closed for %s", DigestUtil.toString(digest))); + log.log(Level.FINER, format("output stream closed for %s", DigestUtil.toString(digest))); } catch (IOException e) { if (Thread.interrupted()) { log.log( @@ -2555,7 +2678,7 @@ Path putAndCopy(Digest digest, boolean isExecutable) throws IOException, Interru throw new InterruptedException(); } else { log.log( - Level.FINE, + Level.FINER, format("failed output stream close for %s", DigestUtil.toString(digest)), e); } @@ -2587,7 +2710,7 @@ private static Exception extractStatusException(IOException e) { private void copyExternalInput(Digest digest, CancellableOutputStream out) throws IOException, InterruptedException { Retrier retrier = new Retrier(Backoff.sequential(5), Retrier.DEFAULT_IS_RETRIABLE); - log.log(Level.FINE, format("downloading %s", DigestUtil.toString(digest))); + log.log(Level.FINER, format("downloading %s", DigestUtil.toString(digest))); try { retrier.execute( () -> { @@ -2608,7 +2731,7 @@ private void copyExternalInput(Digest digest, CancellableOutputStream out) e); // prevent burial by early end of stream during close throw e; } - log.log(Level.FINE, format("download of %s complete", DigestUtil.toString(digest))); + log.log(Level.FINER, format("download of %s complete", DigestUtil.toString(digest))); } @FunctionalInterface @@ -2662,7 +2785,7 @@ private CancellableOutputStream putImpl( if (out == DUPLICATE_OUTPUT_STREAM) { return null; } - log.log(Level.FINE, format("entry %s is missing, downloading and populating", key)); + log.log(Level.FINER, format("entry %s is missing, downloading and populating", key)); return newCancellableOutputStream(out); } @@ -2821,25 +2944,20 @@ private final void renamePath(Path a, Path b) throws IOException, FileAlreadyExi } } - private void deleteExpiredKey(Path path) throws IOException { - // We don't want publishing the metric to delay the deletion of the file. - // We publish the metric only after the file has been deleted. - long createdTime = 0; - if (publishTtlMetric) { - createdTime = path.toFile().lastModified(); - } + private void deleteExpiredKey(String key) throws IOException { + Path path = getRemovingPath(key); + long createdTimeMs = Files.getLastModifiedTime(path).to(MILLISECONDS); deleteFilePath(path); - if (publishTtlMetric) { - publishExpirationMetric(createdTime); - } + publishExpirationMetric(createdTimeMs); } - private void publishExpirationMetric(long createdTime) { - long currentTime = new Date().getTime(); - long ttl = currentTime - createdTime; - casTtl.observe(Time.millisecondsToSeconds(ttl)); + private void publishExpirationMetric(long createdTimeMs) { + // TODO introduce ttl clock + long currentTimeMs = new Date().getTime(); + long ttlMs = currentTimeMs - createdTimeMs; + casTtl.observe(Time.millisecondsToSeconds(ttlMs)); } @SuppressWarnings({"ConstantConditions", "ResultOfMethodCallIgnored"}) @@ -2874,8 +2992,7 @@ private boolean charge(String key, long blobSizeInBytes, AtomicBoolean requiresD "CASFileCache::putImpl ignore deletion for %s expiration due to key reference", expiredKey)); } else { - Path path = getPath(expiredKey); - deleteExpiredKey(path); + deleteExpiredKey(expiredKey); } } catch (NoSuchFileException eNoEnt) { log.log( @@ -2892,7 +3009,7 @@ private boolean charge(String key, long blobSizeInBytes, AtomicBoolean requiresD return immediateFuture(null); } expiredKeyCounter.inc(); - log.log(Level.INFO, format("expired key %s", expiredKey)); + log.log(Level.FINE, format("expired key %s", expiredKey)); return immediateFuture(fileEntryKey.getDigest()); }, expireService)); @@ -3092,13 +3209,14 @@ void commit() throws IOException { Entry existingEntry = null; boolean inserted = false; try { + // acquire the key lock log.log(Level.FINEST, "comitting " + key + " from " + writePath); Path cachePath = CASFileCache.this.getPath(key); CASFileCache.this.renamePath(writePath, cachePath); - existingEntry = storage.putIfAbsent(key, entry); + existingEntry = safeStorageInsertion(key, entry); inserted = existingEntry == null; } catch (FileAlreadyExistsException e) { - log.log(Level.FINE, "file already exists for " + key + ", nonexistent entry will fail"); + log.log(Level.FINER, "file already exists for " + key + ", nonexistent entry will fail"); } finally { if (Files.exists(writePath)) { Files.delete(writePath); @@ -3125,20 +3243,20 @@ void commit() throws IOException { } if (existingEntry != null) { - log.log(Level.FINE, "lost the race to insert " + key); + log.log(Level.FINER, "lost the race to insert " + key); if (!referenceIfExists(key)) { // we would lose our accountability and have a presumed reference if we returned throw new IllegalStateException("storage conflict with existing key for " + key); } } else if (writeWinner.get()) { - log.log(Level.FINE, "won the race to insert " + key); + log.log(Level.FINER, "won the race to insert " + key); try { onInsert.run(); } catch (RuntimeException e) { throw new IOException(e); } } else { - log.log(Level.FINE, "did not win the race to insert " + key); + log.log(Level.FINER, "did not win the race to insert " + key); } } }; @@ -3192,7 +3310,7 @@ public boolean incrementReference() { "entry " + key + " has " + referenceCount + " references and is being incremented..."); } log.log( - Level.FINER, + Level.FINEST, "incrementing references to " + key + " from " @@ -3222,7 +3340,7 @@ public boolean decrementReference(Entry header) { "entry " + key + " has 0 references and is being decremented..."); } log.log( - Level.FINER, + Level.FINEST, "decrementing references to " + key + " from " diff --git a/src/main/java/build/buildfarm/cas/cfc/CasFallbackDelegate.java b/src/main/java/build/buildfarm/cas/cfc/CasFallbackDelegate.java index 5404e6383b..35cf2c2ad5 100644 --- a/src/main/java/build/buildfarm/cas/cfc/CasFallbackDelegate.java +++ b/src/main/java/build/buildfarm/cas/cfc/CasFallbackDelegate.java @@ -52,6 +52,7 @@ public static void start( fileCacheDelegate.start(onStartPut, removeDirectoryService, skipLoad); } } + /** * @brief Get an inputstream to read the given digest data. * @details Creates the delegate's inputstream if possible. @@ -74,6 +75,7 @@ public static InputStream newInput( } return delegate.newInput(compressor, digest, offset); } + /** * @brief Query delegate CAS to find missing blobs. * @details Will not query delegate if there are no missing blobs given. @@ -92,6 +94,7 @@ public static Iterable findMissingBlobs( return delegate.findMissingBlobs(missingDigests); } + /** * @brief Check if delegate CAS contains blob. * @details Will be false if CAS delegate is unavailable. diff --git a/src/main/java/build/buildfarm/cas/cfc/PutDirectoryException.java b/src/main/java/build/buildfarm/cas/cfc/PutDirectoryException.java new file mode 100644 index 0000000000..d635a7217b --- /dev/null +++ b/src/main/java/build/buildfarm/cas/cfc/PutDirectoryException.java @@ -0,0 +1,53 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.cas.cfc; + +import build.bazel.remote.execution.v2.Digest; +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; + +public class PutDirectoryException extends IOException { + private final Path path; + private final Digest digest; + private final List exceptions; + + private static String getErrorMessage(Path path, List exceptions) { + return String.format("%s: %d %s: %s", path, exceptions.size(), "exceptions", exceptions); + } + + public PutDirectoryException(Path path, Digest digest, List exceptions) { + // When printing the exception, show the captured sub-exceptions. + super(getErrorMessage(path, exceptions)); + this.path = path; + this.digest = digest; + this.exceptions = exceptions; + for (Throwable exception : exceptions) { + addSuppressed(exception); + } + } + + Path getPath() { + return path; + } + + public Digest getDigest() { + return digest; + } + + public List getExceptions() { + return exceptions; + } +} diff --git a/src/main/java/build/buildfarm/common/Actions.java b/src/main/java/build/buildfarm/common/Actions.java index ab3c3d8432..d6d1f18ba0 100644 --- a/src/main/java/build/buildfarm/common/Actions.java +++ b/src/main/java/build/buildfarm/common/Actions.java @@ -71,7 +71,12 @@ public static Status asExecutionStatus(Throwable t) { status.setCode(grpcStatus.getCode().value()); } - return status.setMessage(t.getMessage()).build(); + String message = t.getMessage(); + if (message != null) { + status.setMessage(message); + } + + return status.build(); } public static boolean isRetriable(Status status) { diff --git a/src/main/java/build/buildfarm/common/BUILD b/src/main/java/build/buildfarm/common/BUILD index 9cc8779ec2..b639ee4478 100644 --- a/src/main/java/build/buildfarm/common/BUILD +++ b/src/main/java/build/buildfarm/common/BUILD @@ -1,11 +1,14 @@ java_library( name = "common", - srcs = glob([ - "*.java", - "function/*.java", - "io/*.java", - "net/*.java", - ]), + srcs = glob( + [ + "*.java", + "function/*.java", + "io/*.java", + "net/*.java", + ], + exclude = ["BuildfarmExecutors.java"], + ), plugins = [":lombok"], visibility = ["//visibility:public"], deps = [ @@ -13,10 +16,8 @@ java_library( "//src/main/java/build/buildfarm/common/resources", "//src/main/java/build/buildfarm/common/resources:resource_java_proto", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "//third_party/jedis", - "@googleapis//:google_longrunning_operations_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_github_jnr_jnr_constants", "@maven//:com_github_jnr_jnr_ffi", "@maven//:com_github_jnr_jnr_posix", @@ -35,7 +36,46 @@ java_library( "@maven//:org_apache_commons_commons_compress", "@maven//:org_projectlombok_lombok", "@maven//:org_threeten_threetenbp", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) + +java_library( + name = "BuildfarmExecutors", + srcs = [ + "BuildfarmExecutors.java", + ], + plugins = [":lombok"], + visibility = ["//visibility:public"], + deps = [ + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/config", + "//src/main/java/build/buildfarm/common/resources", + "//src/main/java/build/buildfarm/common/resources:resource_java_proto", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@maven//:com_github_jnr_jnr_constants", + "@maven//:com_github_jnr_jnr_ffi", + "@maven//:com_github_jnr_jnr_posix", + "@maven//:com_github_luben_zstd_jni", + "@maven//:com_github_oshi_oshi_core", + "@maven//:com_google_code_findbugs_jsr305", + "@maven//:com_google_guava_failureaccess", + "@maven//:com_google_guava_guava", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_protobuf_protobuf_java_util", + "@maven//:commons_io_commons_io", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_context", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:io_prometheus_simpleclient", + "@maven//:org_apache_commons_commons_compress", + "@maven//:org_projectlombok_lombok", + "@maven//:org_threeten_threetenbp", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/common/BuildfarmExecutors.java b/src/main/java/build/buildfarm/common/BuildfarmExecutors.java index 844c4366d1..f685ca5118 100644 --- a/src/main/java/build/buildfarm/common/BuildfarmExecutors.java +++ b/src/main/java/build/buildfarm/common/BuildfarmExecutors.java @@ -59,6 +59,12 @@ public static ExecutorService getSubscriberPool() { nThreads, new ThreadFactoryBuilder().setNameFormat(threadNameFormat).build()); } + public static ExecutorService getDequeuePool() { + String threadNameFormat = "dequeue-pool-%d"; + return Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setNameFormat(threadNameFormat).build()); + } + public static ListeningExecutorService getTransformServicePool() { int nThreads = 24; String threadNameFormat = "transform-service-pool-%d"; diff --git a/src/main/java/build/buildfarm/common/CommandUtils.java b/src/main/java/build/buildfarm/common/CommandUtils.java index c6eeb150d2..1604743629 100644 --- a/src/main/java/build/buildfarm/common/CommandUtils.java +++ b/src/main/java/build/buildfarm/common/CommandUtils.java @@ -46,7 +46,7 @@ public static boolean isTest(Command command) { * @return The list of output paths. * @note Suggested return identifier: output_paths. */ - public static List getResolvedOutputPaths(Command command, Path actionRoot) { + public static List getResolvedOutputPaths(Command command, Path workingDirectory) { // REAPI clients previously needed to specify whether the output path was a directory or file. // This turned out to be too restrictive-- some build tools don't know what an action produces // until it is done. @@ -65,7 +65,7 @@ public static List getResolvedOutputPaths(Command command, Path actionRoot // `output_directories` will be ignored!" if (command.getOutputPathsCount() != 0) { for (String outputPath : command.getOutputPathsList()) { - resolvedPaths.add(actionRoot.resolve(outputPath)); + resolvedPaths.add(workingDirectory.resolve(outputPath)); } return resolvedPaths; } @@ -73,10 +73,10 @@ public static List getResolvedOutputPaths(Command command, Path actionRoot // Assuming `output_paths` was not used, // fetch deprecated `output_files` and `output_directories` for backwards compatibility. for (String outputPath : command.getOutputFilesList()) { - resolvedPaths.add(actionRoot.resolve(outputPath)); + resolvedPaths.add(workingDirectory.resolve(outputPath)); } for (String outputPath : command.getOutputDirectoriesList()) { - resolvedPaths.add(actionRoot.resolve(outputPath)); + resolvedPaths.add(workingDirectory.resolve(outputPath)); } return resolvedPaths; diff --git a/src/main/java/build/buildfarm/common/Errors.java b/src/main/java/build/buildfarm/common/Errors.java index 24932b994e..603c015d23 100644 --- a/src/main/java/build/buildfarm/common/Errors.java +++ b/src/main/java/build/buildfarm/common/Errors.java @@ -19,5 +19,8 @@ public final class Errors { public static final String VIOLATION_TYPE_INVALID = "INVALID"; + public static final String MISSING_INPUT = + "A requested input (or the `Action` or its `Command`) was not found in the CAS."; + private Errors() {} } diff --git a/src/main/java/build/buildfarm/common/ExecutionProperties.java b/src/main/java/build/buildfarm/common/ExecutionProperties.java index 8126bd607a..198783d21b 100644 --- a/src/main/java/build/buildfarm/common/ExecutionProperties.java +++ b/src/main/java/build/buildfarm/common/ExecutionProperties.java @@ -293,4 +293,25 @@ public class ExecutionProperties { * operation queue). */ public static final String POOL = "Pool"; + + /** + * @field WORKER + * @brief The exec_property to ensure that the action only runs on the worker name given. + * @details Useful for diagnosing worker issues by targeting builds to a specific worker. + */ + public static final String WORKER = "Worker"; + + /** + * @field PERSISTENT_WORKER_KEY + * @brief Hash of tool inputs from --experiemental_remote_mark_tool_inputs + * @details See https://github.com/bazelbuild/bazel/issues/10091 + */ + public static final String PERSISTENT_WORKER_KEY = "persistentWorkerKey"; + + /** + * @field PERSISTENT_WORKER_COMMAND + * @brief Command string to start the persistent worker + * @details See https://github.com/bazelbuild/bazel/issues/10091 + */ + public static final String PERSISTENT_WORKER_COMMAND = "persistentWorkerCommand"; } diff --git a/src/main/java/build/buildfarm/common/LoggingMain.java b/src/main/java/build/buildfarm/common/LoggingMain.java index 9a4d9bd2b0..605f3ca3ef 100644 --- a/src/main/java/build/buildfarm/common/LoggingMain.java +++ b/src/main/java/build/buildfarm/common/LoggingMain.java @@ -7,24 +7,22 @@ public abstract class LoggingMain { protected abstract void onShutdown() throws InterruptedException; - class ShutdownThread extends Thread { - ShutdownThread(String applicationName) { - super(null, null, applicationName + "-Shutdown", 0); - } - - @Override - public void run() { - try { - LoggingMain.this.onShutdown(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } finally { - WaitingLogManager.release(); - } + private void shutdown() { + try { + onShutdown(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + WaitingLogManager.release(); } } protected LoggingMain(String applicationName) { - Runtime.getRuntime().addShutdownHook(new ShutdownThread(applicationName)); + Runtime.getRuntime() + .addShutdownHook( + new Thread( + /* group= */ null, + /* target= */ this::shutdown, + /* name= */ applicationName + "-Shutdown")); } } diff --git a/src/main/java/build/buildfarm/common/OperationFailer.java b/src/main/java/build/buildfarm/common/OperationFailer.java index ca50fe59b1..cb52cc3c90 100644 --- a/src/main/java/build/buildfarm/common/OperationFailer.java +++ b/src/main/java/build/buildfarm/common/OperationFailer.java @@ -20,10 +20,9 @@ import build.buildfarm.v1test.ExecuteEntry; import com.google.longrunning.Operation; import com.google.protobuf.Any; -import com.google.rpc.PreconditionFailure; -import io.grpc.Status.Code; -import java.net.InetAddress; +import com.google.rpc.Status; import com.google.common.base.Strings; +import java.net.InetAddress; /** * @class OperationFailer @@ -32,35 +31,27 @@ * finished and failed. */ public class OperationFailer { - // Not great - consider using publicName if we upstream private static String hostname = null; private static String getHostname() { - if (!Strings.isNullOrEmpty(hostname)) { + if (!Strings.isNullOrEmpty(hostname)) { + return hostname; + } + try { + hostname = InetAddress.getLocalHost().getHostName(); + } catch (Exception e) { + hostname = "_unknown_host_"; + } return hostname; - } - try { - hostname = InetAddress.getLocalHost().getHostName(); - } catch (Exception e) { - hostname = "_unknown_host_"; - } - return hostname; } - public static Operation get( - Operation operation, - ExecuteEntry executeEntry, - String failureType, - String failureMessage, - String failureDetails) { - return operation - .toBuilder() - .setName(executeEntry.getOperationName()) + public static Operation get(Operation operation, ExecuteEntry executeEntry, Status status) { + return operation.toBuilder() .setDone(true) + .setName(executeEntry.getOperationName()) .setMetadata( Any.pack(executeOperationMetadata(executeEntry, ExecutionStage.Value.COMPLETED))) - .setResponse( - Any.pack(failResponse(executeEntry, failureType, failureMessage, failureDetails))) + .setResponse(Any.pack(ExecuteResponse.newBuilder().setStatus(status).build())) .build(); } @@ -73,24 +64,4 @@ private static ExecuteOperationMetadata executeOperationMetadata( .setStage(stage) .build(); } - - private static ExecuteResponse failResponse( - ExecuteEntry executeEntry, String failureType, String failureMessage, String failureDetails) { - PreconditionFailure.Builder preconditionFailureBuilder = PreconditionFailure.newBuilder(); - preconditionFailureBuilder - .addViolationsBuilder() - .setType(failureType) - .setSubject(String.format("[%s] %s", OperationFailer.getHostname(), "blobs/" + DigestUtil.toString(executeEntry.getActionDigest()))) - .setDescription(String.format("[%s] %s", OperationFailer.getHostname(), failureDetails)); - PreconditionFailure preconditionFailure = preconditionFailureBuilder.build(); - - return ExecuteResponse.newBuilder() - .setStatus( - com.google.rpc.Status.newBuilder() - .setCode(Code.FAILED_PRECONDITION.value()) - .setMessage(failureMessage) - .addDetails(Any.pack(preconditionFailure)) - .build()) - .build(); - } } diff --git a/src/main/java/build/buildfarm/common/Queue.java b/src/main/java/build/buildfarm/common/Queue.java new file mode 100644 index 0000000000..0fc9573e90 --- /dev/null +++ b/src/main/java/build/buildfarm/common/Queue.java @@ -0,0 +1,26 @@ +package build.buildfarm.common; + +import java.time.Duration; + +public interface Queue { + // java.util.BlockingQueue-ish + E take(Duration timeout) throws InterruptedException; + + // java.util.Queue + E poll(); + + boolean offer(E e); + + // our special variety + boolean offer(E e, double priority); + + // java.util.Collection + long size(); + + // maybe switch to iterator? + void visit(StringVisitor visitor); + + void visitDequeue(StringVisitor visitor); + + boolean removeFromDequeue(E e); +} diff --git a/src/main/java/build/buildfarm/common/ScanCount.java b/src/main/java/build/buildfarm/common/ScanCount.java index 0f4b8392e1..420a656875 100644 --- a/src/main/java/build/buildfarm/common/ScanCount.java +++ b/src/main/java/build/buildfarm/common/ScanCount.java @@ -16,10 +16,10 @@ import com.google.common.collect.Sets; import java.util.Set; -import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisCluster; -import redis.clients.jedis.ScanParams; -import redis.clients.jedis.ScanResult; +import redis.clients.jedis.UnifiedJedis; +import redis.clients.jedis.params.ScanParams; +import redis.clients.jedis.resps.ScanResult; /** * @class ScanCount @@ -39,21 +39,26 @@ public class ScanCount { * @return Total number of query results. * @note Suggested return identifier: count. */ - public static int get(JedisCluster cluster, String query, int scanCount) { + public static int get(UnifiedJedis jedis, String query, int scanCount) { Set keys = Sets.newHashSet(); - // JedisCluster only supports SCAN commands with MATCH patterns containing hash-tags. - // This prevents us from using the cluster's SCAN to traverse all existing keys. - // That's why we choose to scan each of the jedisNode's individually. - cluster - .getClusterNodes() - .values() - .forEach( - pool -> { - try (Jedis node = pool.getResource()) { - addKeys(node, keys, query, scanCount); - } - }); + if (jedis instanceof JedisCluster) { + JedisCluster cluster = (JedisCluster) jedis; + // JedisCluster only supports SCAN commands with MATCH patterns containing hash-tags. + // This prevents us from using the cluster's SCAN to traverse all existing keys. + // That's why we choose to scan each of the jedisNode's individually. + cluster + .getClusterNodes() + .values() + .forEach( + pool -> { + try (UnifiedJedis node = new UnifiedJedis(pool.getResource())) { + addKeys(node, keys, query, scanCount); + } + }); + } else { + addKeys(jedis, keys, query, scanCount); + } return keys.size(); } @@ -67,7 +72,7 @@ public static int get(JedisCluster cluster, String query, int scanCount) { * @param scanCount The count per scan. */ @SuppressWarnings({"unchecked", "rawtypes"}) - private static void addKeys(Jedis node, Set keys, String query, int scanCount) { + private static void addKeys(UnifiedJedis node, Set keys, String query, int scanCount) { // construct query ScanParams params = new ScanParams(); params.match(query); diff --git a/src/main/java/build/buildfarm/common/WorkerIndexer.java b/src/main/java/build/buildfarm/common/WorkerIndexer.java index d366bf95cc..0531b63ae6 100644 --- a/src/main/java/build/buildfarm/common/WorkerIndexer.java +++ b/src/main/java/build/buildfarm/common/WorkerIndexer.java @@ -19,10 +19,10 @@ import java.util.Set; import java.util.stream.Collectors; import lombok.extern.java.Log; -import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisCluster; -import redis.clients.jedis.ScanParams; -import redis.clients.jedis.ScanResult; +import redis.clients.jedis.UnifiedJedis; +import redis.clients.jedis.params.ScanParams; +import redis.clients.jedis.resps.ScanResult; /** * @class WorkerIndexer @@ -55,21 +55,26 @@ public class WorkerIndexer { * @note Suggested return identifier: indexResults. */ public static CasIndexResults removeWorkerIndexesFromCas( - JedisCluster cluster, CasIndexSettings settings) { + UnifiedJedis jedis, CasIndexSettings settings) { CasIndexResults results = new CasIndexResults(); - // JedisCluster only supports SCAN commands with MATCH patterns containing hash-tags. - // This prevents us from using the cluster's SCAN to traverse all of the CAS. - // That's why we choose to scan each of the jedisNode's individually. - cluster - .getClusterNodes() - .values() - .forEach( - pool -> { - try (Jedis node = pool.getResource()) { - reindexNode(cluster, node, settings, results); - } - }); + if (jedis instanceof JedisCluster) { + JedisCluster cluster = (JedisCluster) jedis; + // JedisCluster only supports SCAN commands with MATCH patterns containing hash-tags. + // This prevents us from using the cluster's SCAN to traverse all of the CAS. + // That's why we choose to scan each of the jedisNode's individually. + cluster + .getClusterNodes() + .values() + .forEach( + pool -> { + try (UnifiedJedis node = new UnifiedJedis(pool.getResource())) { + reindexNode(cluster, node, settings, results); + } + }); + } else { + reindexNode(jedis, jedis, settings, results); + } return results; } @@ -83,7 +88,7 @@ public static CasIndexResults removeWorkerIndexesFromCas( */ @SuppressWarnings({"unchecked", "rawtypes"}) private static void reindexNode( - JedisCluster cluster, Jedis node, CasIndexSettings settings, CasIndexResults results) { + UnifiedJedis cluster, UnifiedJedis node, CasIndexSettings settings, CasIndexResults results) { Long totalKeys = 0L; Long removedKeys = 0L; Long removedHosts = 0L; @@ -91,7 +96,7 @@ private static void reindexNode( log.info( String.format( "Initializing CAS Indexer for Node %s with %d active workers.", - node.getClient().getHost(), activeWorkers.size())); + node.toString(), activeWorkers.size())); // iterate over all CAS entries via scanning // and remove worker from the CAS keys. @@ -128,7 +133,7 @@ private static void reindexNode( results.totalKeys += totalKeys; results.removedKeys += removedKeys; results.removedHosts += removedHosts; - indexerHostsRemovedGauge.labels(node.getClient().getHost()).set(removedHosts); - indexerKeysRemovedGauge.labels(node.getClient().getHost()).set(removedKeys); + indexerHostsRemovedGauge.labels(node.toString()).set(removedHosts); + indexerKeysRemovedGauge.labels(node.toString()).set(removedKeys); } } diff --git a/src/main/java/build/buildfarm/common/Write.java b/src/main/java/build/buildfarm/common/Write.java index ff4f5ccac9..e7f90d0448 100644 --- a/src/main/java/build/buildfarm/common/Write.java +++ b/src/main/java/build/buildfarm/common/Write.java @@ -66,6 +66,7 @@ public FeedbackOutputStream getOutput( /** Discards the specified byte. */ @Override public void write(int b) {} + /** Discards the specified byte array. */ @Override public void write(byte[] b) { diff --git a/src/main/java/build/buildfarm/common/blake3/Blake3HashFunction.java b/src/main/java/build/buildfarm/common/blake3/Blake3HashFunction.java index 7390504b79..b311a6fee1 100644 --- a/src/main/java/build/buildfarm/common/blake3/Blake3HashFunction.java +++ b/src/main/java/build/buildfarm/common/blake3/Blake3HashFunction.java @@ -20,12 +20,10 @@ import com.google.common.hash.HashCode; import com.google.common.hash.HashFunction; import com.google.common.hash.Hasher; -import com.google.errorprone.annotations.Immutable; import java.nio.ByteBuffer; import java.nio.charset.Charset; /** A {@link HashFunction} for BLAKE3. */ -@Immutable public final class Blake3HashFunction implements HashFunction { @Override public int bits() { diff --git a/src/main/java/build/buildfarm/common/config/Admin.java b/src/main/java/build/buildfarm/common/config/Admin.java index 07deb4ce70..f4f8168225 100644 --- a/src/main/java/build/buildfarm/common/config/Admin.java +++ b/src/main/java/build/buildfarm/common/config/Admin.java @@ -11,5 +11,7 @@ public enum DEPLOYMENT_ENVIRONMENT { private DEPLOYMENT_ENVIRONMENT deploymentEnvironment; private String clusterEndpoint; + // This configuration is deprecated but is left here for backwards compatibility. Use + // worker:gracefulShutdownSeconds instead. private boolean enableGracefulShutdown; } diff --git a/src/main/java/build/buildfarm/common/config/BUILD b/src/main/java/build/buildfarm/common/config/BUILD index 4d9c8eb7a0..0d416b827d 100644 --- a/src/main/java/build/buildfarm/common/config/BUILD +++ b/src/main/java/build/buildfarm/common/config/BUILD @@ -8,15 +8,19 @@ java_library( deps = [ "//src/main/java/build/buildfarm/common", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_longrunning_operations_java_proto", - "@googleapis//:google_rpc_code_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@maven//:com_github_oshi_oshi_core", "@maven//:com_github_pcj_google_options", + "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_protobuf_protobuf_java_util", "@maven//:io_grpc_grpc_api", "@maven//:me_dinowernli_java_grpc_prometheus", "@maven//:org_projectlombok_lombok", + "@maven//:org_redisson_redisson", "@maven//:org_yaml_snakeyaml", + "@maven//:redis_clients_jedis", ], ) diff --git a/src/main/java/build/buildfarm/common/config/Backplane.java b/src/main/java/build/buildfarm/common/config/Backplane.java index d8e12de592..0519a35916 100644 --- a/src/main/java/build/buildfarm/common/config/Backplane.java +++ b/src/main/java/build/buildfarm/common/config/Backplane.java @@ -1,7 +1,16 @@ package build.buildfarm.common.config; import com.google.common.base.Strings; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import javax.annotation.Nullable; +import lombok.AccessLevel; import lombok.Data; +import lombok.Getter; +import lombok.ToString; +import oshi.util.FileUtil; +import redis.clients.jedis.util.JedisURIHelper; @Data public class Backplane { @@ -32,12 +41,20 @@ public enum BACKPLANE_TYPE { private String operationChannelPrefix = "OperationChannel"; private String casPrefix = "ContentAddressableStorage"; private int casExpire = 604800; // 1 Week - private boolean subscribeToBackplane = true; - private boolean runFailsafeOperation = true; + private int maxInvocationIdTimeout = 604800; + + @Getter(AccessLevel.NONE) + private boolean subscribeToBackplane = true; // deprecated + + @Getter(AccessLevel.NONE) + private boolean runFailsafeOperation = true; // deprecated + private int maxQueueDepth = 100000; private int maxPreQueueDepth = 1000000; private boolean priorityQueue = false; private Queue[] queues = {}; + private String redisCredentialFile; + @ToString.Exclude // Do not log the password on start-up. private String redisPassword; private int timeout = 10000; private String[] redisNodes = {}; @@ -47,13 +64,32 @@ public enum BACKPLANE_TYPE { private boolean cacheCas = false; private long priorityPollIntervalMillis = 100; - public String getRedisUri() { - // use environment override (useful for containerized deployment) - if (!Strings.isNullOrEmpty(System.getenv("REDIS_URI"))) { - return System.getenv("REDIS_URI"); + // These limited resources are shared across all workers. + // An example would be a limited number of seats to a license server. + private List resources = new ArrayList<>(); + + /** + * Look in several prioritized ways to get a Redis password: + * + *
    + *
  1. the password in the Redis URI (wherever that came from) + *
  2. The `redisPassword` from config YAML + *
  3. the `redisCredentialFile`. + *
+ * + * @return The redis password, or null if unset. + */ + public @Nullable String getRedisPassword() { + URI redisProperUri = URI.create(getRedisUri()); + if (!Strings.isNullOrEmpty(JedisURIHelper.getPassword(redisProperUri))) { + return JedisURIHelper.getPassword(redisProperUri); + } + + if (!Strings.isNullOrEmpty(redisCredentialFile)) { + // Get the password from the config file. + return FileUtil.getStringFromFile(redisCredentialFile); } - // use configured value - return redisUri; + return Strings.emptyToNull(redisPassword); } } diff --git a/src/main/java/build/buildfarm/common/config/BuildfarmConfigs.java b/src/main/java/build/buildfarm/common/config/BuildfarmConfigs.java index 081722cbac..3f9b5461c4 100644 --- a/src/main/java/build/buildfarm/common/config/BuildfarmConfigs.java +++ b/src/main/java/build/buildfarm/common/config/BuildfarmConfigs.java @@ -36,10 +36,10 @@ public final class BuildfarmConfigs { private long maximumActionTimeout = 3600; private long maxEntrySizeBytes = 2147483648L; // 2 * 1024 * 1024 * 1024 private int prometheusPort = 9090; + private boolean allowSymlinkTargetAbsolute = false; private Server server = new Server(); private Backplane backplane = new Backplane(); private Worker worker = new Worker(); - private WebUI ui = new WebUI(); private ExecutionWrappers executionWrappers = new ExecutionWrappers(); private BuildfarmConfigs() {} @@ -68,17 +68,23 @@ public static BuildfarmConfigs loadServerConfigs(String[] args) throws Configura ServerOptions options = parser.getOptions(ServerOptions.class); try { buildfarmConfigs = loadConfigs(getConfigurationPath(parser)); - adjustServerConfigs(buildfarmConfigs); } catch (IOException e) { log.severe("Could not parse yml configuration file." + e); throw new RuntimeException(e); } - if (!options.publicName.isEmpty()) { + if (!Strings.isNullOrEmpty(options.publicName)) { buildfarmConfigs.getServer().setPublicName(options.publicName); } if (options.port > 0) { buildfarmConfigs.getServer().setPort(options.port); } + if (options.prometheusPort >= 0) { + buildfarmConfigs.setPrometheusPort(options.prometheusPort); + } + if (!Strings.isNullOrEmpty(options.redisUri)) { + buildfarmConfigs.getBackplane().setRedisUri(options.redisUri); + } + adjustServerConfigs(buildfarmConfigs); return buildfarmConfigs; } @@ -87,7 +93,6 @@ public static BuildfarmConfigs loadWorkerConfigs(String[] args) throws Configura ShardWorkerOptions options = parser.getOptions(ShardWorkerOptions.class); try { buildfarmConfigs = loadConfigs(getConfigurationPath(parser)); - adjustWorkerConfigs(buildfarmConfigs); } catch (IOException e) { log.severe("Could not parse yml configuration file." + e); throw new RuntimeException(e); @@ -95,6 +100,19 @@ public static BuildfarmConfigs loadWorkerConfigs(String[] args) throws Configura if (!Strings.isNullOrEmpty(options.publicName)) { buildfarmConfigs.getWorker().setPublicName(options.publicName); } + if (options.port >= 0) { + buildfarmConfigs.getWorker().setPort(options.port); + } + if (options.prometheusPort >= 0) { + buildfarmConfigs.setPrometheusPort(options.prometheusPort); + } + if (!Strings.isNullOrEmpty(options.redisUri)) { + buildfarmConfigs.getBackplane().setRedisUri(options.redisUri); + } + if (!Strings.isNullOrEmpty(options.root)) { + buildfarmConfigs.getWorker().setRoot(options.root); + } + adjustWorkerConfigs(buildfarmConfigs); return buildfarmConfigs; } @@ -293,7 +311,8 @@ private static void checkExecutionWrapperAvailability(BuildfarmConfigs configs) if (Files.notExists(Paths.get(tool))) { String message = String.format( - "the execution wrapper %s is missing and therefore the following features will not be available: %s", + "the execution wrapper %s is missing and therefore the following" + + " features will not be available: %s", tool, String.join(", ", features)); log.warning(message); } diff --git a/src/main/java/build/buildfarm/common/config/BuildfarmOptions.java b/src/main/java/build/buildfarm/common/config/BuildfarmOptions.java new file mode 100644 index 0000000000..9fc3f1ed17 --- /dev/null +++ b/src/main/java/build/buildfarm/common/config/BuildfarmOptions.java @@ -0,0 +1,43 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.common.config; + +import com.google.devtools.common.options.Option; +import com.google.devtools.common.options.OptionsBase; + +/** Command-line options definition for Worker. */ +public class BuildfarmOptions extends OptionsBase { + @Option(name = "help", abbrev = 'h', help = "Prints usage info.", defaultValue = "true") + public boolean help; + + @Option( + name = "prometheus_port", + help = "Port for the prometheus service. '0' will disable prometheus hosting", + defaultValue = "-1") + public int prometheusPort; + + @Option( + name = "redis_uri", + help = "URI for Redis connection. Use 'redis://' or 'rediss://' for the scheme", + defaultValue = "") + public String redisUri; + + @Option( + name = "port", + abbrev = 'p', + help = "Port for the buildfarm service.", + defaultValue = "-1") + public int port; +} diff --git a/src/main/java/build/buildfarm/common/config/Cas.java b/src/main/java/build/buildfarm/common/config/Cas.java index 5b16fb2168..9c671d4958 100644 --- a/src/main/java/build/buildfarm/common/config/Cas.java +++ b/src/main/java/build/buildfarm/common/config/Cas.java @@ -3,7 +3,9 @@ import com.google.common.base.Strings; import java.nio.file.Path; import javax.naming.ConfigurationException; +import lombok.AccessLevel; import lombok.Data; +import lombok.Getter; @Data public class Cas { @@ -30,8 +32,8 @@ public enum TYPE { private String target; private boolean readonly = false; - // Metrics - private boolean publishTtlMetric = false; + @Getter(AccessLevel.NONE) + private boolean publishTtlMetric = false; // deprecated public Path getValidPath(Path root) throws ConfigurationException { if (Strings.isNullOrEmpty(path)) { diff --git a/src/main/java/build/buildfarm/common/config/DequeueMatchSettings.java b/src/main/java/build/buildfarm/common/config/DequeueMatchSettings.java index 20ccbf85b4..29655e20de 100644 --- a/src/main/java/build/buildfarm/common/config/DequeueMatchSettings.java +++ b/src/main/java/build/buildfarm/common/config/DequeueMatchSettings.java @@ -3,11 +3,15 @@ import build.bazel.remote.execution.v2.Platform; import java.util.ArrayList; import java.util.List; +import lombok.AccessLevel; import lombok.Data; +import lombok.Getter; @Data public class DequeueMatchSettings { - private boolean acceptEverything = true; + @Getter(AccessLevel.NONE) + private boolean acceptEverything; // deprecated + private boolean allowUnmatched = false; private List properties = new ArrayList(); diff --git a/src/main/java/build/buildfarm/common/config/GrpcMetrics.java b/src/main/java/build/buildfarm/common/config/GrpcMetrics.java index cdd8f05f30..a6e8653500 100644 --- a/src/main/java/build/buildfarm/common/config/GrpcMetrics.java +++ b/src/main/java/build/buildfarm/common/config/GrpcMetrics.java @@ -1,6 +1,7 @@ package build.buildfarm.common.config; import io.grpc.ServerBuilder; +import java.util.List; import lombok.Data; import me.dinowernli.grpc.prometheus.Configuration; import me.dinowernli.grpc.prometheus.MonitoringServerInterceptor; @@ -9,6 +10,8 @@ public class GrpcMetrics { private boolean enabled = false; private boolean provideLatencyHistograms = false; + private double[] latencyBuckets; + private List labelsToReport; public static void handleGrpcMetricIntercepts( ServerBuilder serverBuilder, GrpcMetrics grpcMetrics) { @@ -21,7 +24,17 @@ public static void handleGrpcMetricIntercepts( // Enable latency buckets. if (grpcMetrics.isProvideLatencyHistograms()) { - grpcConfig = grpcConfig.allMetrics(); + grpcConfig = Configuration.allMetrics(); + } + + // provide custom latency buckets + if (grpcMetrics.getLatencyBuckets() != null) { + grpcConfig = grpcConfig.withLatencyBuckets(grpcMetrics.getLatencyBuckets()); + } + + // report custom metric labels + if (grpcMetrics.getLabelsToReport() != null) { + grpcConfig = grpcConfig.withLabelHeaders(grpcMetrics.getLabelsToReport()); } // Apply config to create an interceptor and apply it to the GRPC server. diff --git a/src/main/java/build/buildfarm/common/config/LimitedResource.java b/src/main/java/build/buildfarm/common/config/LimitedResource.java new file mode 100644 index 0000000000..f3b09ff621 --- /dev/null +++ b/src/main/java/build/buildfarm/common/config/LimitedResource.java @@ -0,0 +1,43 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.common.config; + +import lombok.Data; + +/** + * @class Limited Resource + * @brief A fixed amount of a specific resource. + * @details We define a limited resource as a counting semaphore whose configuration contains a name + * and a count representing a physical or logical group of units obtained by executors as a + * precondition to fulfill a long running operation. These units are released upon the + * operation's completion. The resource is requested by the action's platform properties. + */ +@Data +public class LimitedResource { + /** + * @field name + * @brief The name of the resource. + * @details This should correspond to the platform property's key name: + * resources:: + */ + public String name; + + /** + * @field amount + * @brief The total amount of the resource that's available for use during execution. + * @details As a counting semaphore, this amount becomes the limit. + */ + public int amount = 1; +} diff --git a/src/main/java/build/buildfarm/common/config/Metrics.java b/src/main/java/build/buildfarm/common/config/Metrics.java index 73a9113d7a..28e065ddbf 100644 --- a/src/main/java/build/buildfarm/common/config/Metrics.java +++ b/src/main/java/build/buildfarm/common/config/Metrics.java @@ -1,6 +1,8 @@ package build.buildfarm.common.config; +import lombok.AccessLevel; import lombok.Data; +import lombok.Getter; @Data public class Metrics { @@ -19,7 +21,9 @@ public enum LOG_LEVEL { FINEST, } - private PUBLISHER publisher = PUBLISHER.LOG; + @Getter(AccessLevel.NONE) + private PUBLISHER publisher = PUBLISHER.LOG; // deprecated + private LOG_LEVEL logLevel = LOG_LEVEL.FINEST; private String topic; private int topicMaxConnections; diff --git a/src/main/java/build/buildfarm/common/config/SandboxSettings.java b/src/main/java/build/buildfarm/common/config/SandboxSettings.java index 030377bf0c..db198b7c8e 100644 --- a/src/main/java/build/buildfarm/common/config/SandboxSettings.java +++ b/src/main/java/build/buildfarm/common/config/SandboxSettings.java @@ -14,6 +14,8 @@ package build.buildfarm.common.config; +import java.util.ArrayList; +import java.util.List; import lombok.Data; /** @@ -26,11 +28,46 @@ @Data public class SandboxSettings { /** - * @field alwaysUse + * @field alwaysUseSandbox * @brief Whether or not to always use the sandbox when running actions. * @details It may be preferred to enforce sandbox usage than rely on client selection. */ - public boolean alwaysUse = false; + public boolean alwaysUseSandbox = false; + + /** + * @field alwaysUseAsNobody + * @brief Whether or not to always use the as-nobody wrapper when running actions. + * @details It may be preferred to enforce this wrapper instead of relying on client selection. + */ + public boolean alwaysUseAsNobody = false; + + /** + * @field alwaysUseCgroups + * @brief Whether or not to use cgroups when sandboxing actions. + * @details It may be preferred to enforce cgroup usage. + */ + public boolean alwaysUseCgroups = true; + + /** + * @field alwaysUseTmpFs + * @brief Whether or not to always use tmpfs when using the sandbox. + * @details It may be preferred to enforce sandbox usage than rely on client selection. + */ + public boolean alwaysUseTmpFs = false; + + /** + * @field additionalWritePaths + * @brief Additional paths the sandbox is allowed to write to. + * @details Suggestions may include: /tmp, /dev/shm + */ + public List additionalWritePaths = new ArrayList(); + + /** + * @field tmpFsPaths + * @brief Additional paths the sandbox uses for tmpfs + * @details Suggestions may include: /tmp + */ + public List tmpFsPaths = new ArrayList(); /** * @field selectForBlockNetwork diff --git a/src/main/java/build/buildfarm/common/config/Server.java b/src/main/java/build/buildfarm/common/config/Server.java index e169c2575b..0487154c2f 100644 --- a/src/main/java/build/buildfarm/common/config/Server.java +++ b/src/main/java/build/buildfarm/common/config/Server.java @@ -24,6 +24,7 @@ public enum INSTANCE_TYPE { private String sslPrivateKeyPath = null; private boolean runDispatchedMonitor = true; private int dispatchedMonitorIntervalSeconds = 1; + private boolean runFailsafeOperation = true; private boolean runOperationQueuer = true; private boolean ensureOutputsPresent = false; private int maxRequeueAttempts = 5; @@ -41,6 +42,7 @@ public enum INSTANCE_TYPE { private int maxInboundMetadataSize = 0; private ServerCacheConfigs caches = new ServerCacheConfigs(); private boolean findMissingBlobsViaBackplane = false; + private int gracefulShutdownSeconds = 0; public String getSession() { return String.format("buildfarm-server-%s-%s", getPublicName(), sessionGuid); diff --git a/src/main/java/build/buildfarm/common/config/ServerOptions.java b/src/main/java/build/buildfarm/common/config/ServerOptions.java index 5c1e00a0d5..b151c24c7b 100644 --- a/src/main/java/build/buildfarm/common/config/ServerOptions.java +++ b/src/main/java/build/buildfarm/common/config/ServerOptions.java @@ -15,16 +15,9 @@ package build.buildfarm.common.config; import com.google.devtools.common.options.Option; -import com.google.devtools.common.options.OptionsBase; /** Command-line options definition for example server. */ -public class ServerOptions extends OptionsBase { - @Option(name = "help", abbrev = 'h', help = "Prints usage info.", defaultValue = "true") - public boolean help; - - @Option(name = "port", abbrev = 'p', help = "Port to use.", defaultValue = "-1") - public int port; - +public class ServerOptions extends BuildfarmOptions { @Option(name = "public_name", abbrev = 'n', help = "Name of this server.", defaultValue = "") public String publicName; } diff --git a/src/main/java/build/buildfarm/common/config/ShardWorkerOptions.java b/src/main/java/build/buildfarm/common/config/ShardWorkerOptions.java index 7671ae0a4f..c116d31673 100644 --- a/src/main/java/build/buildfarm/common/config/ShardWorkerOptions.java +++ b/src/main/java/build/buildfarm/common/config/ShardWorkerOptions.java @@ -15,13 +15,9 @@ package build.buildfarm.common.config; import com.google.devtools.common.options.Option; -import com.google.devtools.common.options.OptionsBase; /** Command-line options definition for Worker. */ -public class ShardWorkerOptions extends OptionsBase { - @Option(name = "help", abbrev = 'h', help = "Prints usage info.", defaultValue = "true") - public boolean help; - +public class ShardWorkerOptions extends BuildfarmOptions { @Option( name = "root", help = "Root base directory for all work being performed.", diff --git a/src/main/java/build/buildfarm/common/config/WebUI.java b/src/main/java/build/buildfarm/common/config/WebUI.java deleted file mode 100644 index 443d210691..0000000000 --- a/src/main/java/build/buildfarm/common/config/WebUI.java +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2023 The Bazel Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package build.buildfarm.common.config; - -import lombok.Data; - -/** - * @class WebUI - * @brief Settings for buildfarm's web UI. - * @details Buildfarm provides a web frontend for developers to introspect builds. - */ -@Data -public class WebUI { - /** - * @field enable - * @brief Whether to enable the web frontend. - * @details When disabled there will be no ports opened or routes available. - */ - public boolean enable = false; - - /** - * @field port - * @brief HTTP port for the web frontend. - * @details 8080 is useful for local testing since port 80 requires sudo. We choose the following - * default since many ports are blocked in upstream CI. - */ - public String port = "8982"; -} diff --git a/src/main/java/build/buildfarm/common/config/Worker.java b/src/main/java/build/buildfarm/common/config/Worker.java index 294b70dc2f..1a95d94956 100644 --- a/src/main/java/build/buildfarm/common/config/Worker.java +++ b/src/main/java/build/buildfarm/common/config/Worker.java @@ -5,6 +5,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import javax.naming.ConfigurationException; @@ -28,15 +29,24 @@ public class Worker { private int inputFetchStageWidth = 0; private int inputFetchDeadline = 60; private boolean linkInputDirectories = true; - private List realInputDirectories = Arrays.asList("external"); + private List linkedInputDirectories = Arrays.asList("(?!external)[^/]+"); private String execOwner; private int defaultMaxCores = 0; private boolean limitGlobalExecution = false; private boolean onlyMulticoreTests = false; private boolean allowBringYourOwnContainer = false; private boolean errorOperationRemainingResources = false; + private int gracefulShutdownSeconds = 0; private ExecutionPolicy[] executionPolicies = {}; private SandboxSettings sandboxSettings = new SandboxSettings(); + private boolean createSymlinkOutputs = false; + + // These limited resources are only for the individual worker. + // An example would be hardware resources such as GPUs. + // If you want GPU actions to run exclusively, define a single GPU resource. + private List resources = new ArrayList<>(); + + private boolean errorOperationOutputSizeExceeded = false; public ExecutionPolicy[] getExecutionPolicies() { if (executionPolicies != null) { diff --git a/src/main/java/build/buildfarm/common/grpc/BUILD b/src/main/java/build/buildfarm/common/grpc/BUILD index 7bff874ad8..14a866e052 100644 --- a/src/main/java/build/buildfarm/common/grpc/BUILD +++ b/src/main/java/build/buildfarm/common/grpc/BUILD @@ -5,17 +5,18 @@ java_library( visibility = ["//visibility:public"], deps = [ "//src/main/java/build/buildfarm/common", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/common/grpc/Channels.java b/src/main/java/build/buildfarm/common/grpc/Channels.java new file mode 100644 index 0000000000..0531218f23 --- /dev/null +++ b/src/main/java/build/buildfarm/common/grpc/Channels.java @@ -0,0 +1,40 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.common.grpc; + +import io.grpc.ManagedChannel; +import io.grpc.netty.NegotiationType; +import io.grpc.netty.NettyChannelBuilder; + +public final class Channels { + private static final String GRPCS_URL_PREFIX = "grpcs://"; + private static final String GRPC_URL_PREFIX = "grpc://"; + + private Channels() {} + + public static ManagedChannel createChannel(String target) { + NegotiationType negotiationType = NegotiationType.PLAINTEXT; + if (target.startsWith(GRPCS_URL_PREFIX)) { + target = target.substring(GRPCS_URL_PREFIX.length()); + negotiationType = NegotiationType.TLS; + } else if (target.startsWith(GRPC_URL_PREFIX)) { + target = target.substring(GRPC_URL_PREFIX.length()); + negotiationType = NegotiationType.PLAINTEXT; + } + NettyChannelBuilder builder = + NettyChannelBuilder.forTarget(target).negotiationType(negotiationType); + return builder.build(); + } +} diff --git a/src/main/java/build/buildfarm/common/grpc/Retrier.java b/src/main/java/build/buildfarm/common/grpc/Retrier.java index 6fd7802e79..3e81bbf528 100644 --- a/src/main/java/build/buildfarm/common/grpc/Retrier.java +++ b/src/main/java/build/buildfarm/common/grpc/Retrier.java @@ -100,10 +100,10 @@ public int getRetryAttempts() { static Supplier sequential(int maxAttempts) { return exponential( - /* initial=*/ Duration.ZERO, - /* max=*/ Duration.ZERO, - /* multiplier=*/ 1.1, - /* jitter=*/ 0.0, + /* initial= */ Duration.ZERO, + /* max= */ Duration.ZERO, + /* multiplier= */ 1.1, + /* jitter= */ 0.0, maxAttempts); } @@ -203,7 +203,7 @@ public int getRetryAttempts() { @SuppressWarnings("Guava") public Retrier(Supplier backoffSupplier, Predicate isRetriable) { - this(backoffSupplier, isRetriable, /* retryScheduler=*/ null); + this(backoffSupplier, isRetriable, /* retryScheduler= */ null); } @SuppressWarnings("Guava") diff --git a/src/main/java/build/buildfarm/common/grpc/StubWriteOutputStream.java b/src/main/java/build/buildfarm/common/grpc/StubWriteOutputStream.java index 26b67f5c6c..0b2b7199f0 100644 --- a/src/main/java/build/buildfarm/common/grpc/StubWriteOutputStream.java +++ b/src/main/java/build/buildfarm/common/grpc/StubWriteOutputStream.java @@ -15,6 +15,7 @@ package build.buildfarm.common.grpc; import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; import static com.google.common.util.concurrent.Futures.immediateFuture; import static java.lang.String.format; import static java.util.logging.Level.WARNING; @@ -131,21 +132,25 @@ public StubWriteOutputStream( @Override public void close() throws IOException { + StreamObserver finishedWriteObserver; + boolean cancelled = false; if (!checkComplete()) { boolean finishWrite = expectedSize == UNLIMITED_EXPECTED_SIZE; if (finishWrite || offset != 0) { initiateWrite(); flushSome(finishWrite); } - synchronized (this) { - if (writeObserver != null) { - if (finishWrite || getCommittedSize() + offset == expectedSize) { - writeObserver.onCompleted(); - } else { - writeObserver.onError(Status.CANCELLED.asException()); - } - writeObserver = null; - } + cancelled = !finishWrite && getCommittedSize() + offset != expectedSize; + } + synchronized (this) { + finishedWriteObserver = writeObserver; + writeObserver = null; + } + if (finishedWriteObserver != null) { + if (cancelled) { + finishedWriteObserver.onError(Status.CANCELLED.asException()); + } else { + finishedWriteObserver.onCompleted(); } } } @@ -160,12 +165,18 @@ private void flushSome(boolean finishWrite) { request.setResourceName(resourceName); } synchronized (this) { - writeObserver.onNext(request.build()); + // writeObserver can be nulled by a completion race + // expect that we are completed in this case + if (writeObserver != null) { + writeObserver.onNext(request.build()); + wasReset = false; + writtenBytes += offset; + offset = 0; + sentResourceName = true; + } else { + checkState(writeFuture.isDone(), "writeObserver nulled without completion"); + } } - wasReset = false; - writtenBytes += offset; - offset = 0; - sentResourceName = true; } @Override @@ -227,6 +238,16 @@ public void onNext(WriteResponse response) { @Override public void onError(Throwable t) { + if (Status.fromThrowable(t).getCode() != Code.CANCELLED) { + log.log( + WARNING, + format( + "%s: write(%s) on worker %s after %d bytes of content", + Status.fromThrowable(t).getCode().name(), + resourceName, + bsStub.get().getChannel().authority(), + writtenBytes)); + } writeFuture.setException(exceptionTranslator.apply(t)); } @@ -326,11 +347,7 @@ public FeedbackOutputStream getOutput( this.deadlineAfter = deadlineAfter; this.deadlineAfterUnits = deadlineAfterUnits; this.onReadyHandler = onReadyHandler; - synchronized (this) { - if (writeObserver == null) { - initiateWrite(); - } - } + initiateWrite(); return this; } diff --git a/src/main/java/build/buildfarm/common/io/Directories.java b/src/main/java/build/buildfarm/common/io/Directories.java index 9e7b272d70..b861664e25 100644 --- a/src/main/java/build/buildfarm/common/io/Directories.java +++ b/src/main/java/build/buildfarm/common/io/Directories.java @@ -48,8 +48,8 @@ public class Directories { private Directories() {} - private static void makeWritable(Path dir, boolean writable) throws IOException { - FileStore fileStore = Files.getFileStore(dir); + private static void makeWritable(Path dir, boolean writable, FileStore fileStore) + throws IOException { if (fileStore.supportsFileAttributeView("posix")) { if (writable) { Files.setPosixFilePermissions(dir, writablePerms); @@ -82,14 +82,15 @@ private static void makeWritable(Path dir, boolean writable) throws IOException } } - public static ListenableFuture remove(Path path, ExecutorService service) { + public static ListenableFuture remove( + Path path, FileStore fileStore, ExecutorService service) { String suffix = UUID.randomUUID().toString(); Path filename = path.getFileName(); String tmpFilename = filename + ".tmp." + suffix; Path tmpPath = path.resolveSibling(tmpFilename); try { // MacOS does not permit renames unless the directory is permissioned appropriately - makeWritable(path, true); + makeWritable(path, true, fileStore); // rename must be synchronous to call Files.move(path, tmpPath); } catch (IOException e) { @@ -99,7 +100,7 @@ public static ListenableFuture remove(Path path, ExecutorService service) .submit( () -> { try { - remove(tmpPath); + remove(tmpPath, fileStore); } catch (IOException e) { log.log(Level.SEVERE, "error removing directory " + tmpPath, e); } @@ -107,14 +108,14 @@ public static ListenableFuture remove(Path path, ExecutorService service) null); } - public static void remove(Path directory) throws IOException { + public static void remove(Path directory, FileStore fileStore) throws IOException { Files.walkFileTree( directory, new SimpleFileVisitor() { @Override public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { - makeWritable(dir, true); + makeWritable(dir, true, fileStore); return FileVisitResult.CONTINUE; } @@ -158,12 +159,12 @@ public FileVisitResult postVisitDirectory(Path dir, IOException e) throws IOExce }); } - public static void disableAllWriteAccess(Path directory) throws IOException { - forAllPostDirs(directory, dir -> makeWritable(dir, false)); + public static void disableAllWriteAccess(Path directory, FileStore fileStore) throws IOException { + forAllPostDirs(directory, dir -> makeWritable(dir, false, fileStore)); } - public static void enableAllWriteAccess(Path directory) throws IOException { - forAllPostDirs(directory, dir -> makeWritable(dir, true)); + public static void enableAllWriteAccess(Path directory, FileStore fileStore) throws IOException { + forAllPostDirs(directory, dir -> makeWritable(dir, true, fileStore)); } public static void setAllOwner(Path directory, UserPrincipal owner) throws IOException { diff --git a/src/main/java/build/buildfarm/common/io/FFIdirent.java b/src/main/java/build/buildfarm/common/io/FFIdirent.java index d3880b2722..6c9fd7ddf5 100644 --- a/src/main/java/build/buildfarm/common/io/FFIdirent.java +++ b/src/main/java/build/buildfarm/common/io/FFIdirent.java @@ -32,5 +32,5 @@ public java.lang.String getName() { public final Signed64 d_off = new Signed64(); public final Unsigned16 d_reclen = new Unsigned16(); public final Unsigned8 d_type = new Unsigned8(); - public final AsciiString d_name = new AsciiString(MAX_NAME_LEN); + public final UTF8String d_name = new UTF8String(MAX_NAME_LEN); } diff --git a/src/main/java/build/buildfarm/common/io/Utils.java b/src/main/java/build/buildfarm/common/io/Utils.java index 64a4ebcf50..3457cc3d62 100644 --- a/src/main/java/build/buildfarm/common/io/Utils.java +++ b/src/main/java/build/buildfarm/common/io/Utils.java @@ -291,7 +291,8 @@ public static FileStatus stat(final Path path, final boolean followSymlinks, Fil boolean isReadOnlyExecutable; try { attributes = Files.readAttributes(path, BasicFileAttributes.class, linkOpts(followSymlinks)); - isReadOnlyExecutable = EvenMoreFiles.isReadOnlyExecutable(path, fileStore); + isReadOnlyExecutable = + !attributes.isSymbolicLink() && EvenMoreFiles.isReadOnlyExecutable(path, fileStore); } catch (java.nio.file.FileSystemException e) { throw new NoSuchFileException(path + ERR_NO_SUCH_FILE_OR_DIR); } diff --git a/src/main/java/build/buildfarm/common/redis/BUILD b/src/main/java/build/buildfarm/common/redis/BUILD index 9efbc97947..c72a354f91 100644 --- a/src/main/java/build/buildfarm/common/redis/BUILD +++ b/src/main/java/build/buildfarm/common/redis/BUILD @@ -7,11 +7,12 @@ java_library( "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/common/config", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "//third_party/jedis", + "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", "@maven//:io_grpc_grpc_api", "@maven//:io_prometheus_simpleclient", "@maven//:org_projectlombok_lombok", "@maven//:org_redisson_redisson", + "@maven//:redis_clients_jedis", ], ) diff --git a/src/main/java/build/buildfarm/common/redis/BalancedRedisQueue.java b/src/main/java/build/buildfarm/common/redis/BalancedRedisQueue.java index 52f937c8ed..8ed911d6c0 100644 --- a/src/main/java/build/buildfarm/common/redis/BalancedRedisQueue.java +++ b/src/main/java/build/buildfarm/common/redis/BalancedRedisQueue.java @@ -14,13 +14,27 @@ package build.buildfarm.common.redis; +import static com.google.common.collect.Iterables.transform; + +import build.buildfarm.common.Queue; import build.buildfarm.common.StringVisitor; -import build.buildfarm.common.config.Queue; import build.buildfarm.v1test.QueueStatus; +import com.google.common.collect.ImmutableList; +import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import javax.annotation.Nullable; +import redis.clients.jedis.Connection; +import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.JedisPooled; +import redis.clients.jedis.UnifiedJedis; +import redis.clients.jedis.util.JedisClusterCRC16; /** * @class BalancedRedisQueue @@ -32,9 +46,9 @@ * the same underlying redis queues. */ public class BalancedRedisQueue { - private static final int START_TIMEOUT_SECONDS = 1; + private static final Duration START_TIMEOUT = Duration.ofSeconds(1); - private static final int MAX_TIMEOUT_SECONDS = 8; + private static final Duration MAX_TIMEOUT = Duration.ofSeconds(8); /** * @field name @@ -44,13 +58,6 @@ public class BalancedRedisQueue { */ private final String name; - /** - * @field queueType - * @brief Type of the queue. - * @details It's used for selecting between regular and priority queues - */ - private final Queue.QUEUE_TYPE queueType; - /** * @field originalHashtag * @brief The original hashtag of the name provided to the queue. @@ -76,7 +83,9 @@ public class BalancedRedisQueue { * @details Although these are multiple queues, the balanced redis queue treats them as one in its * interface. */ - private final List queues = new ArrayList<>(); + private final List queues; + + private final QueueDecorator queueDecorator; /** * @field currentPushQueue @@ -101,20 +110,8 @@ public class BalancedRedisQueue { * @param hashtags Hashtags to distribute queue data. * @note Overloaded. */ - public BalancedRedisQueue(String name, List hashtags) { - this(name, hashtags, -1, Queue.QUEUE_TYPE.standard); - } - - /** - * @brief Constructor. - * @details Construct a named redis queue with an established redis cluster. - * @param name The global name of the queue. - * @param hashtags Hashtags to distribute queue data. - * @param queueType Type of the queue in use - * @note Overloaded. - */ - public BalancedRedisQueue(String name, List hashtags, Queue.QUEUE_TYPE queueType) { - this(name, hashtags, -1, queueType); + public BalancedRedisQueue(String name, List hashtags, QueueDecorator queueDecorator) { + this(name, hashtags, -1, queueDecorator); } /** @@ -125,26 +122,18 @@ public BalancedRedisQueue(String name, List hashtags, Queue.QUEUE_TYPE q * @param maxQueueSize The maximum amount of elements that should be added to the queue. * @note Overloaded. */ - public BalancedRedisQueue(String name, List hashtags, int maxQueueSize) { - this(name, hashtags, maxQueueSize, Queue.QUEUE_TYPE.standard); + public BalancedRedisQueue( + String name, List hashtags, int maxQueueSize, QueueDecorator queueDecorator) { + this(name, maxQueueSize, createHashedQueues(name, hashtags), queueDecorator); } - /** - * @brief Constructor. - * @details Construct a named redis queue with an established redis cluster. - * @param name The global name of the queue. - * @param hashtags Hashtags to distribute queue data. - * @param maxQueueSize The maximum amount of elements that should be added to the queue. - * @param queueType Type of the queue in use - * @note Overloaded. - */ public BalancedRedisQueue( - String name, List hashtags, int maxQueueSize, Queue.QUEUE_TYPE queueType) { + String name, int maxQueueSize, List queues, QueueDecorator queueDecorator) { this.originalHashtag = RedisHashtags.existingHash(name); this.name = RedisHashtags.unhashedName(name); - this.queueType = queueType; this.maxQueueSize = maxQueueSize; - createHashedQueues(this.name, hashtags, this.queueType); + this.queues = queues; + this.queueDecorator = queueDecorator; } /** @@ -152,8 +141,11 @@ public BalancedRedisQueue( * @details Adds the value into one of the internal backend redis queues. * @param val The value to push onto the queue. */ - public void push(JedisCluster jedis, String val) { - queues.get(roundRobinPushIndex()).push(jedis, val); + public boolean offer(UnifiedJedis unified, String val) { + String queue = queues.get(roundRobinPushIndex()); + try (Jedis jedis = getJedisFromKey(unified, queue)) { + return queueDecorator.decorate(jedis, queue).offer(val); + } } /** @@ -161,8 +153,11 @@ public void push(JedisCluster jedis, String val) { * @details Adds the value into one of the internal backend redis queues. * @param val The value to push onto the queue. */ - public void push(JedisCluster jedis, String val, double priority) { - queues.get(roundRobinPushIndex()).push(jedis, val, priority); + public boolean offer(UnifiedJedis unified, String val, double priority) { + String queue = queues.get(roundRobinPushIndex()); + try (Jedis jedis = getJedisFromKey(unified, queue)) { + return queueDecorator.decorate(jedis, queue).offer(val, priority); + } } /** @@ -172,15 +167,52 @@ public void push(JedisCluster jedis, String val, double priority) { * @return Whether or not the value was removed. * @note Suggested return identifier: wasRemoved. */ - public boolean removeFromDequeue(JedisCluster jedis, String val) { - for (QueueInterface queue : partialIterationQueueOrder()) { - if (queue.removeFromDequeue(jedis, val)) { - return true; + public boolean removeFromDequeue(UnifiedJedis unified, String val) { + for (String queue : partialIterationQueueOrder()) { + try (Jedis jedis = getJedisFromKey(unified, queue)) { + if (queueDecorator.decorate(jedis, queue).removeFromDequeue(val)) { + return true; + } } } return false; } + private String take(Jedis jedis, Queue queue, Duration timeout, ExecutorService service) + throws InterruptedException { + return interruptibleRequest(() -> queue.take(timeout), jedis::disconnect, service); + } + + private T interruptibleRequest( + Callable command, Runnable onInterrupted, ExecutorService service) + throws InterruptedException { + Future reply = service.submit(command); + return getBlockingReply(reply, onInterrupted); + } + + private T getBlockingReply(Future reply, Runnable onInterrupted) + throws InterruptedException { + InterruptedException interruption = null; + for (; ; ) { + try { + return reply.get(); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + if (interruption != null) { + interruption.addSuppressed(cause); + Thread.currentThread().interrupt(); + throw interruption; + } + throw new RuntimeException(cause); + } catch (InterruptedException e) { + interruption = e; + Thread.interrupted(); + reply.cancel(true); + onInterrupted.run(); + } + } + } + /** * @brief Pop element into internal dequeue and return value. * @details This pops the element from one queue atomically into an internal list called the @@ -189,7 +221,7 @@ public boolean removeFromDequeue(JedisCluster jedis, String val) { * @return The value of the transfered element. null if the thread was interrupted. * @note Suggested return identifier: val. */ - public String dequeue(JedisCluster jedis) throws InterruptedException { + public String take(UnifiedJedis unified, ExecutorService service) throws InterruptedException { // The conditions of this algorithm are as followed: // - from a client's perspective we want to block indefinitely. // (so this function should not return null under any normal circumstances.) @@ -211,24 +243,35 @@ public String dequeue(JedisCluster jedis) throws InterruptedException { // end this phase if we have done a full round-robin boolean blocking = false; // try each of the internal queues with exponential backoff - int currentTimeout_s = START_TIMEOUT_SECONDS; + Duration currentTimeout = START_TIMEOUT; while (true) { final String val; - QueueInterface queue = queues.get(roundRobinPopIndex()); - if (blocking) { - val = queue.dequeue(jedis, currentTimeout_s); - } else { - val = queue.nonBlockingDequeue(jedis); + String queueName = queues.get(roundRobinPopIndex()); + try (Jedis jedis = getJedisFromKey(unified, queueName)) { + Queue queue = queueDecorator.decorate(jedis, queueName); + if (blocking) { + val = take(jedis, queue, currentTimeout, service); + } else { + val = queue.poll(); + } } // return if found if (val != null) { return val; } + // not quite immediate yet... + if (Thread.currentThread().isInterrupted()) { + throw new InterruptedException(); + } + if (currentPopQueue == startQueue) { // advance timeout if blocking on queue and not at max each queue cycle if (blocking) { - currentTimeout_s = Math.min(currentTimeout_s * 2, MAX_TIMEOUT_SECONDS); + currentTimeout = currentTimeout.multipliedBy(2); + if (currentTimeout.compareTo(MAX_TIMEOUT) > 0) { + currentTimeout = MAX_TIMEOUT; + } } else { blocking = true; } @@ -236,13 +279,41 @@ public String dequeue(JedisCluster jedis) throws InterruptedException { } } + private static Jedis getJedisFromKey(UnifiedJedis jedis, String name) { + Connection connection = null; + if (jedis instanceof JedisCluster) { + JedisCluster cluster = (JedisCluster) jedis; + connection = cluster.getConnectionFromSlot(JedisClusterCRC16.getSlot(name)); + } else if (jedis instanceof JedisPooled) { + JedisPooled pooled = (JedisPooled) jedis; + connection = pooled.getPool().getResource(); + } + if (connection == null) { + throw new IllegalArgumentException(jedis.toString()); + } + return new Jedis(connection); + } + + /** + * @brief Pop element into internal dequeue and return value. + * @details Null is returned if the queue is empty. + * @return The value of the transfered element. null if queue is empty or thread was interrupted. + * @note Suggested return identifier: val. + */ + public @Nullable String poll(UnifiedJedis unified) { + String queue = queues.get(roundRobinPopIndex()); + try (Jedis jedis = getJedisFromKey(unified, queue)) { + return queueDecorator.decorate(jedis, queue).poll(); + } + } + /** * @brief Get the current pop queue. * @details Get the queue that the balanced queue intends to pop from next. * @return The queue that the balanced queue intends to pop from next. * @note Suggested return identifier: currentPopQueue. */ - public QueueInterface getCurrentPopQueue() { + public String getCurrentPopQueue() { return queues.get(currentPopQueue); } @@ -263,7 +334,7 @@ public int getCurrentPopQueueIndex() { * @return The internal queue found at that index. * @note Suggested return identifier: internalQueue. */ - public QueueInterface getInternalQueue(int index) { + public String getInternalQueue(int index) { return queues.get(index); } @@ -289,19 +360,35 @@ public String getName() { return name; } + // annoying that there's no inject/accumulate + private static long size(Iterable sizes) { + long size = 0; + for (long s : sizes) { + size += s; + } + return size; + } + /** * @brief Get size. * @details Checks the current length of the queue. * @return The current length of the queue. * @note Suggested return identifier: length. */ - public long size(JedisCluster jedis) { + public long size(UnifiedJedis unified) { // the accumulated size of all of the queues - long size = 0; - for (QueueInterface queue : queues) { - size += queue.size(jedis); + return size(sizes(unified)); + } + + private long size(UnifiedJedis unified, String queue) { + try (Jedis jedis = getJedisFromKey(unified, queue)) { + return queueDecorator.decorate(jedis, queue).size(); } - return size; + } + + private Iterable sizes(UnifiedJedis unified) { + // this could be done in parallel + return transform(queues, queue -> size(unified, queue)); } /** @@ -310,16 +397,16 @@ public long size(JedisCluster jedis) { * @return The current status of the queue. * @note Suggested return identifier: status. */ - public QueueStatus status(JedisCluster jedis) { + public QueueStatus status(UnifiedJedis unified) { // get properties - long size = size(jedis); - List sizes = new ArrayList<>(); - for (QueueInterface queue : queues) { - sizes.add(queue.size(jedis)); - } + Iterable sizes = sizes(unified); // build proto - return QueueStatus.newBuilder().setName(name).setSize(size).addAllInternalSizes(sizes).build(); + return QueueStatus.newBuilder() + .setName(RedisHashtags.hashedName(name, originalHashtag)) + .setSize(size(sizes)) + .addAllInternalSizes(sizes) + .build(); } /** @@ -327,9 +414,11 @@ public QueueStatus status(JedisCluster jedis) { * @details Enacts a visitor over each element in the queue. * @param visitor A visitor for each visited element in the queue. */ - public void visit(JedisCluster jedis, StringVisitor visitor) { - for (QueueInterface queue : fullIterationQueueOrder()) { - queue.visit(jedis, visitor); + public void visit(UnifiedJedis unified, StringVisitor visitor) { + for (String queue : fullIterationQueueOrder()) { + try (Jedis jedis = getJedisFromKey(unified, queue)) { + queueDecorator.decorate(jedis, queue).visit(visitor); + } } } @@ -338,9 +427,11 @@ public void visit(JedisCluster jedis, StringVisitor visitor) { * @details Enacts a visitor over each element in the dequeue. * @param visitor A visitor for each visited element in the queue. */ - public void visitDequeue(JedisCluster jedis, StringVisitor visitor) { - for (QueueInterface queue : fullIterationQueueOrder()) { - queue.visitDequeue(jedis, visitor); + public void visitDequeue(UnifiedJedis unified, StringVisitor visitor) { + for (String queue : fullIterationQueueOrder()) { + try (Jedis jedis = getJedisFromKey(unified, queue)) { + queueDecorator.decorate(jedis, queue).visitDequeue(visitor); + } } } @@ -352,12 +443,13 @@ public void visitDequeue(JedisCluster jedis, StringVisitor visitor) { * @return Whether or not the queues values are evenly distributed by internal queues. * @note Suggested return identifier: isEvenlyDistributed. */ - public boolean isEvenlyDistributed(JedisCluster jedis) { - long size = queues.get(0).size(jedis); - for (QueueInterface queue : partialIterationQueueOrder()) { - if (queue.size(jedis) != size) { + public boolean isEvenlyDistributed(UnifiedJedis unified) { + long size = -1; + for (long queueSize : sizes(unified)) { + if (size != -1 && queueSize != size) { return false; } + size = queueSize; } return true; } @@ -370,7 +462,7 @@ public boolean isEvenlyDistributed(JedisCluster jedis) { * @param jedis Jedis cluster client. * @return Whether are not a new element can be added to the queue based on its current size. */ - public boolean canQueue(JedisCluster jedis) { + public boolean canQueue(UnifiedJedis jedis) { return maxQueueSize < 0 || size(jedis) < maxQueueSize; } @@ -380,12 +472,9 @@ public boolean canQueue(JedisCluster jedis) { * @param name The global name of the queue. * @param hashtags Hashtags to distribute queue data. */ - private void createHashedQueues(String name, List hashtags, Queue.QUEUE_TYPE queueType) { - // create an internal queue for each of the provided hashtags - for (String hashtag : hashtags) { - queues.add( - new RedisQueueFactory().getQueue(queueType, RedisHashtags.hashedName(name, hashtag))); - } + private static List createHashedQueues(String name, List hashtags) { + String unhashedName = RedisHashtags.unhashedName(name); + ImmutableList.Builder queues = ImmutableList.builder(); // if there were no hashtags, we'll create a single internal queue // so that the balanced redis queue can still function. // we'll use the basename provided to create the single internal queue and use the original @@ -394,15 +483,14 @@ private void createHashedQueues(String name, List hashtags, Queue.QUEUE_ // note: we must build the balanced queues internal queue with a hashtag because it will dequeue // to the same redis slot. if (hashtags.isEmpty()) { - if (!originalHashtag.isEmpty()) { - queues.add( - new RedisQueueFactory() - .getQueue(queueType, RedisHashtags.hashedName(name, originalHashtag))); - } else { - queues.add( - new RedisQueueFactory().getQueue(queueType, RedisHashtags.hashedName(name, "06S"))); - } + String originalHashtag = RedisHashtags.existingHash(name); + hashtags = ImmutableList.of(originalHashtag.isEmpty() ? "06S" : originalHashtag); + } + // create an internal queue for each of the provided hashtags + for (String hashtag : hashtags) { + queues.add(RedisHashtags.hashedName(unhashedName, hashtag)); } + return queues.build(); } /** @@ -411,7 +499,7 @@ private void createHashedQueues(String name, List hashtags, Queue.QUEUE_ * @return The current round-robin index. * @note Suggested return identifier: queueIndex. */ - private int roundRobinPushIndex() { + private synchronized int roundRobinPushIndex() { int currentIndex = currentPushQueue; currentPushQueue = nextQueueInRoundRobin(currentPushQueue); return currentIndex; @@ -423,7 +511,7 @@ private int roundRobinPushIndex() { * @return The current round-robin index. * @note Suggested return identifier: queueIndex. */ - private int roundRobinPopIndex() { + private synchronized int roundRobinPopIndex() { int currentIndex = currentPopQueue; currentPopQueue = nextQueueInRoundRobin(currentPopQueue); return currentIndex; @@ -451,7 +539,7 @@ private int nextQueueInRoundRobin(int index) { * @return An ordered list of queues. * @note Suggested return identifier: queues. */ - private List fullIterationQueueOrder() { + private List fullIterationQueueOrder() { // if we are going to iterate over all of the queues // there will be no noticeable side effects from the order return queues; @@ -467,11 +555,11 @@ private List fullIterationQueueOrder() { * @return An ordered list of queues. * @note Suggested return identifier: queues. */ - private List partialIterationQueueOrder() { + private List partialIterationQueueOrder() { // to improve cpu utilization, we can try randomizing // the order we traverse the internal queues for operations // that may return early - List randomQueues = new ArrayList<>(queues); + List randomQueues = new ArrayList<>(queues); Collections.shuffle(randomQueues); return randomQueues; } diff --git a/src/main/java/build/buildfarm/common/redis/ProvisionedRedisQueue.java b/src/main/java/build/buildfarm/common/redis/ProvisionedRedisQueue.java index 75d3d2cc0c..2c65ad3922 100644 --- a/src/main/java/build/buildfarm/common/redis/ProvisionedRedisQueue.java +++ b/src/main/java/build/buildfarm/common/redis/ProvisionedRedisQueue.java @@ -16,7 +16,6 @@ import build.buildfarm.common.ExecutionProperties; import build.buildfarm.common.MapUtils; -import build.buildfarm.common.config.Queue; import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSetMultimap; import com.google.common.collect.SetMultimap; @@ -91,7 +90,7 @@ public class ProvisionedRedisQueue { */ public ProvisionedRedisQueue( String name, List hashtags, SetMultimap filterProvisions) { - this(name, Queue.QUEUE_TYPE.standard, hashtags, filterProvisions, false); + this(name, RedisQueue::decorate, hashtags, filterProvisions, false); } /** @@ -109,7 +108,7 @@ public ProvisionedRedisQueue( List hashtags, SetMultimap filterProvisions, boolean allowUserUnmatched) { - this(name, Queue.QUEUE_TYPE.standard, hashtags, filterProvisions, allowUserUnmatched); + this(name, RedisQueue::decorate, hashtags, filterProvisions, allowUserUnmatched); } /** @@ -123,10 +122,10 @@ public ProvisionedRedisQueue( */ public ProvisionedRedisQueue( String name, - Queue.QUEUE_TYPE type, + QueueDecorator queueDecorator, List hashtags, SetMultimap filterProvisions) { - this(name, type, hashtags, filterProvisions, false); + this(name, queueDecorator, hashtags, filterProvisions, false); } /** @@ -142,11 +141,11 @@ public ProvisionedRedisQueue( */ public ProvisionedRedisQueue( String name, - Queue.QUEUE_TYPE type, + QueueDecorator queueDecorator, List hashtags, SetMultimap filterProvisions, boolean allowUserUnmatched) { - this.queue = new BalancedRedisQueue(name, hashtags, type); + this.queue = new BalancedRedisQueue(name, hashtags, queueDecorator); isFullyWildcard = filterProvisions.containsKey(WILDCARD_VALUE); provisions = filterProvisionsByWildcard(filterProvisions, isFullyWildcard); this.allowUserUnmatched = allowUserUnmatched; diff --git a/src/main/java/build/buildfarm/common/redis/QueueDecorator.java b/src/main/java/build/buildfarm/common/redis/QueueDecorator.java new file mode 100644 index 0000000000..3f8e34fc93 --- /dev/null +++ b/src/main/java/build/buildfarm/common/redis/QueueDecorator.java @@ -0,0 +1,8 @@ +package build.buildfarm.common.redis; + +import build.buildfarm.common.Queue; +import redis.clients.jedis.Jedis; + +public interface QueueDecorator { + Queue decorate(Jedis jedis, String name); +} diff --git a/src/main/java/build/buildfarm/common/redis/QueueInterface.java b/src/main/java/build/buildfarm/common/redis/QueueInterface.java deleted file mode 100644 index b764d5a40a..0000000000 --- a/src/main/java/build/buildfarm/common/redis/QueueInterface.java +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2020-2022 The Bazel Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package build.buildfarm.common.redis; - -import build.buildfarm.common.StringVisitor; -import redis.clients.jedis.JedisCluster; - -/** - * @class QueueInterface - * @brief A redis queue interface. - */ -public abstract class QueueInterface { - /** - * @brief Push a value onto the queue with default priority of 1. - * @details Adds the value into the backend rdered set. - * @param val The value to push onto the priority queue. - */ - abstract void push(JedisCluster jedis, String val); - - /** - * @brief Push a value onto the queue with defined priority. - * @details Adds the value into the backend rdered set. - * @param val The value to push onto the priority queue. - */ - abstract void push(JedisCluster jedis, String val, double priority); - - /** - * @brief Remove element from dequeue. - * @details Removes an element from the dequeue and specifies whether it was removed. - * @param val The value to remove. - * @return Whether or not the value was removed. - * @note Suggested return identifier: wasRemoved. - */ - abstract boolean removeFromDequeue(JedisCluster jedis, String val); - - /** - * @brief Remove all elements that match from queue. - * @details Removes all matching elements from the queue and specifies whether it was removed. - * @param val The value to remove. - * @return Whether or not the value was removed. - * @note Suggested return identifier: wasRemoved. - */ - abstract boolean removeAll(JedisCluster jedis, String val); - - /** - * @brief Pop element into internal dequeue and return value. - * @details This pops the element from one queue atomically into an internal list called the - * dequeue. It will wait until the timeout has expired. Null is returned if the timeout has - * expired. - * @param timeout_s Timeout to wait if there is no item to dequeue. (units: seconds (s)) - * @return The value of the transfered element. null if the thread was interrupted. - * @note Overloaded. - * @note Suggested return identifier: val. - */ - abstract String dequeue(JedisCluster jedis, int timeout_s) throws InterruptedException; - - /** - * @brief Pop element into internal dequeue and return value. - * @details This pops the element from one queue atomically into an internal list called the - * dequeue. It does not block and null is returned if there is nothing to dequeue. - * @return The value of the transfered element. null if nothing was dequeued. - * @note Suggested return identifier: val. - */ - abstract String nonBlockingDequeue(JedisCluster jedis) throws InterruptedException; - - /** - * @brief Get name. - * @details Get the name of the queue. this is the redis key used for the list. - * @return The name of the queue. - * @note Suggested return identifier: name. - */ - abstract String getName(); - - /** - * @brief Get dequeue name. - * @details Get the name of the internal dequeue used by the queue. this is the redis key used for - * the list. - * @return The name of the queue. - * @note Suggested return identifier: name. - */ - abstract String getDequeueName(); - - /** - * @brief Get size. - * @details Checks the current length of the queue. - * @return The current length of the queue. - * @note Suggested return identifier: length. - */ - abstract long size(JedisCluster jedis); - - /** - * @brief Visit each element in the queue. - * @details Enacts a visitor over each element in the queue. - * @param visitor A visitor for each visited element in the queue. - * @note Overloaded. - */ - abstract void visit(JedisCluster jedis, StringVisitor visitor); - - /** - * @brief Visit each element in the dequeue. - * @details Enacts a visitor over each element in the dequeue. - * @param visitor A visitor for each visited element in the queue. - */ - abstract void visitDequeue(JedisCluster jedis, StringVisitor visitor); -} diff --git a/src/main/java/build/buildfarm/common/redis/RedisClient.java b/src/main/java/build/buildfarm/common/redis/RedisClient.java index acdc134f6c..087bbd88b4 100644 --- a/src/main/java/build/buildfarm/common/redis/RedisClient.java +++ b/src/main/java/build/buildfarm/common/redis/RedisClient.java @@ -24,6 +24,8 @@ import java.net.SocketTimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import redis.clients.jedis.UnifiedJedis; +import redis.clients.jedis.exceptions.JedisClusterOperationException; import java.util.function.Supplier; import java.util.logging.Level; import lombok.extern.java.Log; @@ -31,7 +33,6 @@ import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.exceptions.JedisDataException; import redis.clients.jedis.exceptions.JedisException; -import redis.clients.jedis.exceptions.JedisNoReachableClusterNodeException; /** * @class RedisClient @@ -57,12 +58,12 @@ public class RedisClient implements Closeable { @FunctionalInterface public interface JedisContext { - T run(JedisCluster jedis) throws JedisException; + T run(UnifiedJedis jedis) throws JedisException; } @FunctionalInterface public interface JedisInterruptibleContext { - T run(JedisCluster jedis) throws InterruptedException, JedisException; + T run(UnifiedJedis jedis) throws InterruptedException, JedisException; } private static class JedisMisconfigurationException extends JedisDataException { @@ -80,27 +81,26 @@ public JedisMisconfigurationException(final String message, final Throwable caus } // We store the factory in case we want to re-create the jedis client. - private Supplier jedisClusterFactory; + private Supplier unifiedJedisFactory; - // The jedis client. - private JedisCluster jedis; + private UnifiedJedis jedis; private boolean closed = false; - public RedisClient(JedisCluster jedis) { + public RedisClient(UnifiedJedis jedis) { this.jedis = jedis; } public RedisClient( - Supplier jedisClusterFactory, + Supplier unifiedJedisFactory, int reconnectClientAttempts, int reconnectClientWaitDurationMs) { try { - this.jedis = jedisClusterFactory.get(); + this.jedis = unifiedJedisFactory.get(); } catch (Exception e) { log.log(Level.SEVERE, "Unable to establish redis client: " + e.toString()); } - this.jedisClusterFactory = jedisClusterFactory; + this.unifiedJedisFactory = unifiedJedisFactory; this.reconnectClientAttempts = reconnectClientAttempts; this.reconnectClientWaitDurationMs = reconnectClientWaitDurationMs; } @@ -121,7 +121,7 @@ private synchronized void throwIfClosed() throws IOException { } } - public void run(Consumer withJedis) throws IOException { + public void run(Consumer withJedis) throws IOException { call( (JedisContext) jedis -> { @@ -197,7 +197,7 @@ private T callImpl(JedisContext withJedis) throws IOException { private void rebuildJedisCluser() { try { log.log(Level.SEVERE, "Rebuilding redis client"); - jedis = jedisClusterFactory.get(); + jedis = unifiedJedisFactory.get(); } catch (Exception e) { redisClientRebuildErrorCounter.inc(); log.log(Level.SEVERE, "Failed to rebuild redis client"); @@ -216,7 +216,7 @@ private T defaultCall(JedisContext withJedis) throws IOException { } throw e; } - } catch (JedisMisconfigurationException | JedisNoReachableClusterNodeException e) { + } catch (JedisMisconfigurationException | JedisClusterOperationException e) { // In regards to a Jedis misconfiguration, // the backplane is configured not to accept writes currently // as a result of an error. The error is meant to indicate diff --git a/src/main/java/build/buildfarm/common/redis/RedisHashMap.java b/src/main/java/build/buildfarm/common/redis/RedisHashMap.java index 64ab462fbe..e43dd385cf 100644 --- a/src/main/java/build/buildfarm/common/redis/RedisHashMap.java +++ b/src/main/java/build/buildfarm/common/redis/RedisHashMap.java @@ -14,10 +14,12 @@ package build.buildfarm.common.redis; +import com.google.common.collect.Iterables; +import java.util.List; import java.util.Map; import java.util.Set; -import redis.clients.jedis.JedisCluster; -import redis.clients.jedis.JedisClusterPipeline; +import redis.clients.jedis.PipelineBase; +import redis.clients.jedis.UnifiedJedis; /** * @class RedisHashMap @@ -55,7 +57,7 @@ public RedisHashMap(String name) { * @return Whether a new key was inserted. If a key is overwritten with a new value, this would be * false. */ - public boolean insert(JedisCluster jedis, String key, String value) { + public boolean insert(UnifiedJedis jedis, String key, String value) { return jedis.hset(name, key, value) == 1; } @@ -67,7 +69,7 @@ public boolean insert(JedisCluster jedis, String key, String value) { * @param value The value for the key. * @return Whether a new key was inserted. If a key already exists, this would be false. */ - public boolean insertIfMissing(JedisCluster jedis, String key, String value) { + public boolean insertIfMissing(UnifiedJedis jedis, String key, String value) { return jedis.hsetnx(name, key, value) == 1; } @@ -78,7 +80,7 @@ public boolean insertIfMissing(JedisCluster jedis, String key, String value) { * @param key The name of the key. * @return Whether the key exists or not in the map. */ - public boolean exists(JedisCluster jedis, String key) { + public boolean exists(UnifiedJedis jedis, String key) { return jedis.hexists(name, key); } @@ -89,7 +91,7 @@ public boolean exists(JedisCluster jedis, String key) { * @param key The name of the key. * @return Whether the key was removed. */ - public boolean remove(JedisCluster jedis, String key) { + public boolean remove(UnifiedJedis jedis, String key) { return jedis.hdel(name, key) == 1; } @@ -99,12 +101,12 @@ public boolean remove(JedisCluster jedis, String key) { * @param jedis Jedis cluster client. * @param key The names of the keys. */ - public void remove(JedisCluster jedis, Iterable keys) { - JedisClusterPipeline p = jedis.pipelined(); - for (String key : keys) { - p.hdel(name, key); + public void remove(UnifiedJedis jedis, Iterable keys) { + try (PipelineBase p = jedis.pipelined()) { + for (String key : keys) { + p.hdel(name, key); + } } - p.sync(); } /** @@ -113,7 +115,7 @@ public void remove(JedisCluster jedis, Iterable keys) { * @return The size of the map. * @note Suggested return identifier: size. */ - public long size(JedisCluster jedis) { + public long size(UnifiedJedis jedis) { return jedis.hlen(name); } @@ -123,7 +125,7 @@ public long size(JedisCluster jedis) { * @param jedis Jedis cluster client. * @return The redis hashmap keys represented as a set. */ - public Set keys(JedisCluster jedis) { + public Set keys(UnifiedJedis jedis) { return jedis.hkeys(name); } @@ -133,7 +135,17 @@ public Set keys(JedisCluster jedis) { * @param jedis Jedis cluster client. * @return The redis hashmap represented as a java map. */ - public Map asMap(JedisCluster jedis) { + public Map asMap(UnifiedJedis jedis) { return jedis.hgetAll(name); } + + /** + * @brief Get values associated with the specified fields from the hashmap. + * @param jedis Jedis cluster client. + * @param fields The name of the fields. + * @return Values associated with the specified fields + */ + public List mget(UnifiedJedis jedis, Iterable fields) { + return jedis.hmget(name, Iterables.toArray(fields, String.class)); + } } diff --git a/src/main/java/build/buildfarm/common/redis/RedisMap.java b/src/main/java/build/buildfarm/common/redis/RedisMap.java index dcc6741246..8fd2e96158 100644 --- a/src/main/java/build/buildfarm/common/redis/RedisMap.java +++ b/src/main/java/build/buildfarm/common/redis/RedisMap.java @@ -20,9 +20,9 @@ import java.util.List; import java.util.Map; import java.util.stream.StreamSupport; -import redis.clients.jedis.JedisCluster; -import redis.clients.jedis.JedisClusterPipeline; +import redis.clients.jedis.PipelineBase; import redis.clients.jedis.Response; +import redis.clients.jedis.UnifiedJedis; /** * @class RedisMap @@ -78,7 +78,7 @@ public RedisMap(String name, int timeout_s) { * @param timeout_s Timeout to expire the entry. (units: seconds (s)) * @note Overloaded. */ - public void insert(JedisCluster jedis, String key, String value, int timeout_s) { + public void insert(UnifiedJedis jedis, String key, String value, int timeout_s) { jedis.setex(createKeyName(key), timeout_s, value); } @@ -91,7 +91,7 @@ public void insert(JedisCluster jedis, String key, String value, int timeout_s) * @param timeout_s Timeout to expire the entry. (units: seconds (s)) * @note Overloaded. */ - public void insert(JedisCluster jedis, String key, String value, long timeout_s) { + public void insert(UnifiedJedis jedis, String key, String value, long timeout_s) { // Jedis only provides int precision. this is fine as the units are seconds. // We supply an interface for longs as a convenience to callers. jedis.setex(createKeyName(key), (int) timeout_s, value); @@ -105,7 +105,7 @@ public void insert(JedisCluster jedis, String key, String value, long timeout_s) * @param value The value for the key. * @note Overloaded. */ - public void insert(JedisCluster jedis, String key, String value) { + public void insert(UnifiedJedis jedis, String key, String value) { // Jedis only provides int precision. this is fine as the units are seconds. // We supply an interface for longs as a convenience to callers. jedis.setex(createKeyName(key), expiration_s, value); @@ -118,7 +118,7 @@ public void insert(JedisCluster jedis, String key, String value) { * @param key The name of the key. * @note Overloaded. */ - public void remove(JedisCluster jedis, String key) { + public void remove(UnifiedJedis jedis, String key) { jedis.del(createKeyName(key)); } @@ -129,12 +129,12 @@ public void remove(JedisCluster jedis, String key) { * @param keys The name of the keys. * @note Overloaded. */ - public void remove(JedisCluster jedis, Iterable keys) { - JedisClusterPipeline p = jedis.pipelined(); - for (String key : keys) { - p.del(createKeyName(key)); + public void remove(UnifiedJedis jedis, Iterable keys) { + try (PipelineBase p = jedis.pipelined()) { + for (String key : keys) { + p.del(createKeyName(key)); + } } - p.sync(); } /** @@ -146,7 +146,7 @@ public void remove(JedisCluster jedis, Iterable keys) { * @note Overloaded. * @note Suggested return identifier: value. */ - public String get(JedisCluster jedis, String key) { + public String get(UnifiedJedis jedis, String key) { return jedis.get(createKeyName(key)); } @@ -159,19 +159,21 @@ public String get(JedisCluster jedis, String key) { * @note Overloaded. * @note Suggested return identifier: values. */ - public Iterable> get(JedisCluster jedis, Iterable keys) { + public Iterable> get(UnifiedJedis jedis, Iterable keys) { // Fetch items via pipeline - JedisClusterPipeline p = jedis.pipelined(); - List>> values = new ArrayList<>(); - StreamSupport.stream(keys.spliterator(), false) - .forEach(key -> values.add(new AbstractMap.SimpleEntry<>(key, p.get(createKeyName(key))))); - p.sync(); - - List> resolved = new ArrayList<>(); - for (Map.Entry> val : values) { - resolved.add(new AbstractMap.SimpleEntry<>(val.getKey(), val.getValue().get())); + try (PipelineBase p = jedis.pipelined()) { + List>> values = new ArrayList<>(); + StreamSupport.stream(keys.spliterator(), false) + .forEach( + key -> values.add(new AbstractMap.SimpleEntry<>(key, p.get(createKeyName(key))))); + p.sync(); + + List> resolved = new ArrayList<>(); + for (Map.Entry> val : values) { + resolved.add(new AbstractMap.SimpleEntry<>(val.getKey(), val.getValue().get())); + } + return resolved; } - return resolved; } /** @@ -182,7 +184,7 @@ public Iterable> get(JedisCluster jedis, Iterable>> SINGLETON_NODE_SLOT_RANGES = + ImmutableList.of(ImmutableList.of(ImmutableList.of(0l, CLUSTER_HASHSLOTS - 1l))); + /** * @brief Get a list of evenly distributing hashtags for the provided redis cluster. * @details Each hashtag will map to a slot on a different node. @@ -38,19 +46,22 @@ public class RedisNodeHashes { * @return Hashtags that will each has to a slot on a different node. * @note Suggested return identifier: hashtags. */ - @SuppressWarnings({"unchecked", "rawtypes"}) - public static List getEvenlyDistributedHashes(JedisCluster jedis) { - try { - List> slotRanges = getSlotRanges(jedis); - ImmutableList.Builder hashTags = ImmutableList.builder(); - for (List slotRange : slotRanges) { - // we can use any slot that is in range for the node. - // in this case, we will use the first slot. - hashTags.add(RedisSlotToHash.correlate(slotRange.get(0))); + public static List getEvenlyDistributedHashes(UnifiedJedis jedis) { + if (jedis instanceof JedisCluster) { + try { + Iterable>> nodeSlotRanges = getNodeSlotRanges(jedis); + ImmutableList.Builder hashTags = ImmutableList.builder(); + for (List> slotRanges : nodeSlotRanges) { + // we can use any slot that is in range for the node. + // in this case, we will use the first slot in the first range. + hashTags.add(RedisSlotToHash.correlate(slotRanges.get(0).get(0))); + } + return hashTags.build(); + } catch (JedisException e) { + return ImmutableList.of(); } - return hashTags.build(); - } catch (JedisException e) { - return ImmutableList.of(); + } else { + return ImmutableList.of(""); } } @@ -62,21 +73,24 @@ public static List getEvenlyDistributedHashes(JedisCluster jedis) { * @return Hashtags that will each has to a slot on a different node. * @note Suggested return identifier: hashtags. */ - @SuppressWarnings({"unchecked", "rawtypes"}) public static List getEvenlyDistributedHashesWithPrefix( - JedisCluster jedis, String prefix) { - try { - List> slotRanges = getSlotRanges(jedis); - ImmutableList.Builder hashTags = ImmutableList.builder(); - for (List slotRange : slotRanges) { - // we can use any slot that is in range for the node. - // in this case, we will use the first slot. - hashTags.add( - RedisSlotToHash.correlateRangeWithPrefix(slotRange.get(0), slotRange.get(1), prefix)); + UnifiedJedis jedis, String prefix) { + if (jedis instanceof JedisCluster) { + JedisCluster cluster = (JedisCluster) jedis; + Iterable>> nodeSlotRanges = getNodeSlotRanges(cluster); + try { + ImmutableList.Builder hashTags = ImmutableList.builder(); + for (List> slotRanges : nodeSlotRanges) { + // we can use any slot that is in range for the node. + // in this case, we will use the first slot. + hashTags.add(RedisSlotToHash.correlateRangesWithPrefix(slotRanges, prefix)); + } + return hashTags.build(); + } catch (JedisException e) { + return ImmutableList.of(); } - return hashTags.build(); - } catch (JedisException e) { - return ImmutableList.of(); + } else { + return ImmutableList.of(prefix); } } @@ -85,48 +99,30 @@ public static List getEvenlyDistributedHashesWithPrefix( * @details This information can be found from any of the redis nodes in the cluster. * @param jedis An established jedis client. * @return Slot ranges for all of the nodes in the cluster. - * @note Suggested return identifier: slotRanges. + * @note Suggested return identifier: nodeSlotRanges. */ - @SuppressWarnings("unchecked") - private static List> getSlotRanges(JedisCluster jedis) { - // get slot information for each node - List slots = getClusterSlots(jedis); - - // convert slot information into a list of slot ranges - ImmutableList.Builder> slotRanges = ImmutableList.builder(); - for (Object slotInfoObj : slots) { - List slotInfo = (List) slotInfoObj; - List slotNums = slotInfoToSlotRange(slotInfo); - slotRanges.add(slotNums); + private static Iterable>> getNodeSlotRanges(UnifiedJedis jedis) { + if (jedis instanceof JedisCluster) { + // get slot range information for each shard + return transform(getClusterShards((JedisCluster) jedis), ClusterShardInfo::getSlots); + } else { + return SINGLETON_NODE_SLOT_RANGES; } - - return slotRanges.build(); - } - - /** - * @brief Convert a jedis slotInfo object to a range or slot numbers. - * @details Every redis node has a range of slots represented as integers. - * @param slotInfo Slot info objects from a redis node. - * @return The slot number range for the particular redis node. - * @note Suggested return identifier: slotRange. - */ - private static List slotInfoToSlotRange(List slotInfo) { - return ImmutableList.of((Long) slotInfo.get(0), (Long) slotInfo.get(1)); } /** - * @brief Query slot information for each redis node. + * @brief Query shard information from any redis node. * @details Obtains cluster information for understanding slot ranges for balancing. * @param jedis An established jedis client. * @return Cluster slot information. - * @note Suggested return identifier: clusterSlots. + * @note Suggested return identifier: clusterShards. */ - private static List getClusterSlots(JedisCluster jedis) { + private static List getClusterShards(JedisCluster jedis) { JedisException nodeException = null; - for (Map.Entry node : jedis.getClusterNodes().entrySet()) { - JedisPool pool = node.getValue(); - try (Jedis resource = pool.getResource()) { - return resource.clusterSlots(); + for (Map.Entry node : jedis.getClusterNodes().entrySet()) { + ConnectionPool pool = node.getValue(); + try (Jedis resource = new Jedis(pool.getResource())) { + return resource.clusterShards(); } catch (JedisException e) { nodeException = e; // log error with node @@ -135,6 +131,6 @@ private static List getClusterSlots(JedisCluster jedis) { if (nodeException != null) { throw nodeException; } - throw new JedisNoReachableClusterNodeException("No reachable node in cluster"); + throw new JedisClusterOperationException("No reachable node in cluster"); } } diff --git a/src/main/java/build/buildfarm/common/redis/RedisPriorityQueue.java b/src/main/java/build/buildfarm/common/redis/RedisPriorityQueue.java index 82ef45adbd..c2473992a8 100644 --- a/src/main/java/build/buildfarm/common/redis/RedisPriorityQueue.java +++ b/src/main/java/build/buildfarm/common/redis/RedisPriorityQueue.java @@ -14,22 +14,33 @@ package build.buildfarm.common.redis; +import build.buildfarm.common.Queue; import build.buildfarm.common.StringVisitor; -import java.util.Arrays; +import com.google.common.collect.ImmutableList; +import java.time.Clock; +import java.time.Duration; import java.util.List; -import java.util.Set; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.Jedis; /** - * @class RedisQueue - * @brief A redis queue. + * @class RedisPriorityQueue + * @brief A redis priority queue. * @details A redis queue is an implementation of a queue data structure which internally uses redis * to store and distribute the data. Its important to know that the lifetime of the queue * persists before and after the queue data structure is created (since it exists in redis). * Therefore, two redis queues with the same name, would in fact be the same underlying redis * queue. */ -public class RedisPriorityQueue extends QueueInterface { +public class RedisPriorityQueue implements Queue { + private static final Clock defaultClock = Clock.systemUTC(); + private static final long defaultPollIntervalMillis = 100; + + public static Queue decorate(Jedis jedis, String name) { + return new RedisPriorityQueue(jedis, name); + } + + private final Jedis jedis; + /** * @field name * @brief The unique name of the queue. @@ -39,18 +50,16 @@ public class RedisPriorityQueue extends QueueInterface { private final String name; private final String script; - private Timestamp time; - private final List keys; + private final Clock clock; private final long pollIntervalMillis; - private static long defaultPollIntervalMillis = 100; /** * @brief Constructor. * @details Construct a named redis queue with an established redis cluster. * @param name The global name of the queue. */ - public RedisPriorityQueue(String name) { - this(name, new Timestamp(), defaultPollIntervalMillis); + public RedisPriorityQueue(Jedis jedis, String name) { + this(jedis, name, defaultPollIntervalMillis); } /** @@ -58,10 +67,9 @@ public RedisPriorityQueue(String name) { * @details Construct a named redis queue with an established redis cluster. Used to ease the * testing of the order of the queued actions * @param name The global name of the queue. - * @param time Timestamp of the operation. */ - public RedisPriorityQueue(String name, Timestamp time) { - this(name, time, defaultPollIntervalMillis); + public RedisPriorityQueue(Jedis jedis, String name, Clock clock) { + this(jedis, name, clock, defaultPollIntervalMillis); } /** @@ -71,8 +79,8 @@ public RedisPriorityQueue(String name, Timestamp time) { * @param name The global name of the queue. * @param pollIntervalMillis pollInterval to use when dqueuing from redis. */ - public RedisPriorityQueue(String name, long pollIntervalMillis) { - this(name, new Timestamp(), pollIntervalMillis); + public RedisPriorityQueue(Jedis jedis, String name, long pollIntervalMillis) { + this(jedis, name, defaultClock, pollIntervalMillis); } /** @@ -83,10 +91,10 @@ public RedisPriorityQueue(String name, long pollIntervalMillis) { * @param time Timestamp of the operation. * @param pollIntervalMillis pollInterval to use when dqueuing from redis. */ - public RedisPriorityQueue(String name, Timestamp time, long pollIntervalMillis) { + public RedisPriorityQueue(Jedis jedis, String name, Clock clock, long pollIntervalMillis) { + this.jedis = jedis; this.name = name; - this.time = time; - this.keys = Arrays.asList(name); + this.clock = clock; this.script = getLuaScript(); this.pollIntervalMillis = pollIntervalMillis; } @@ -97,19 +105,21 @@ public RedisPriorityQueue(String name, Timestamp time, long pollIntervalMillis) * @param val The value to push onto the priority queue. */ @Override - public void push(JedisCluster jedis, String val) { - push(jedis, val, 0); + public boolean offer(String val) { + return offer(val, 0); } /** * @brief Push a value onto the queue with specified priority. - * @details Adds the value into the backend redis ordered set. + * @details Adds the value into the backend redis ordered set, with timestamp primary insertion to + * guarantee FIFO within a single priority level * @param val The value to push onto the priority queue. * @param priority The priority of action 0 means highest */ @Override - public void push(JedisCluster jedis, String val, double priority) { - jedis.zadd(name, priority, time.getNanos() + ":" + val); + public boolean offer(String val, double priority) { + jedis.zadd(name, priority, clock.millis() + ":" + val); + return true; } /** @@ -120,7 +130,7 @@ public void push(JedisCluster jedis, String val, double priority) { * @note Suggested return identifier: wasRemoved. */ @Override - public boolean removeFromDequeue(JedisCluster jedis, String val) { + public boolean removeFromDequeue(String val) { return jedis.lrem(getDequeueName(), -1, val) != 0; } @@ -131,8 +141,7 @@ public boolean removeFromDequeue(JedisCluster jedis, String val) { * @return Whether or not the value was removed. * @note Suggested return identifier: wasRemoved. */ - @Override - public boolean removeAll(JedisCluster jedis, String val) { + public boolean removeAll(String val) { return jedis.zrem(name, val) != 0; } @@ -141,18 +150,18 @@ public boolean removeAll(JedisCluster jedis, String val) { * @details This pops the element from one queue atomically into an internal list called the * dequeue. It will wait until the timeout has expired. Null is returned if the timeout has * expired. - * @param timeout_s Timeout to wait if there is no item to dequeue. (units: seconds (s)) + * @param timeout Timeout to wait if there is no item to dequeue. * @return The value of the transfered element. null if the thread was interrupted. * @note Overloaded. * @note Suggested return identifier: val. */ @Override - public String dequeue(JedisCluster jedis, int timeout_s) throws InterruptedException { - int maxAttempts = (int) (timeout_s / (pollIntervalMillis / 1000.0)); - List args = Arrays.asList(name, getDequeueName(), "true"); + public String take(Duration timeout) throws InterruptedException { + int maxAttempts = Math.max(1, (int) (timeout.toMillis() / pollIntervalMillis)); + List args = ImmutableList.of(name, getDequeueName(), "true"); String val; for (int i = 0; i < maxAttempts; ++i) { - Object obj_val = jedis.eval(script, keys, args); + Object obj_val = jedis.eval(script, ImmutableList.of(name), args); val = String.valueOf(obj_val); if (!isEmpty(val)) { return val; @@ -170,16 +179,13 @@ public String dequeue(JedisCluster jedis, int timeout_s) throws InterruptedExcep * @note Suggested return identifier: val. */ @Override - public String nonBlockingDequeue(JedisCluster jedis) throws InterruptedException { - List args = Arrays.asList(name, getDequeueName()); - Object obj_val = jedis.eval(script, keys, args); + public String poll() { + List args = ImmutableList.of(name, getDequeueName()); + Object obj_val = jedis.eval(script, ImmutableList.of(name), args); String val = String.valueOf(obj_val); if (!isEmpty(val)) { return val; } - if (Thread.currentThread().isInterrupted()) { - throw new InterruptedException(); - } return null; } @@ -189,7 +195,6 @@ public String nonBlockingDequeue(JedisCluster jedis) throws InterruptedException * @return The name of the queue. * @note Suggested return identifier: name. */ - @Override public String getName() { return name; } @@ -211,8 +216,7 @@ public String getDequeueName() { * @return The current length of the queue. * @note Suggested return identifier: length. */ - @Override - public long size(JedisCluster jedis) { + public long size() { return jedis.zcard(name); } @@ -223,8 +227,8 @@ public long size(JedisCluster jedis) { * @note Overloaded. */ @Override - public void visit(JedisCluster jedis, StringVisitor visitor) { - visit(jedis, name, visitor); + public void visit(StringVisitor visitor) { + visit(name, visitor); } /** @@ -233,7 +237,7 @@ public void visit(JedisCluster jedis, StringVisitor visitor) { * @param visitor A visitor for each visited element in the queue. */ @Override - public void visitDequeue(JedisCluster jedis, StringVisitor visitor) { + public void visitDequeue(StringVisitor visitor) { int listPageSize = 10000; int index = 0; int nextIndex = listPageSize; @@ -256,11 +260,11 @@ public void visitDequeue(JedisCluster jedis, StringVisitor visitor) { * @param visitor A visitor for each visited element in the queue. * @note Overloaded. */ - private void visit(JedisCluster jedis, String queueName, StringVisitor visitor) { + private void visit(String queueName, StringVisitor visitor) { int listPageSize = 10000; int index = 0; int nextIndex = listPageSize; - Set entries; + List entries; do { entries = jedis.zrange(queueName, index, nextIndex - 1); @@ -290,14 +294,14 @@ private String getLuaScript() { "end", "assert(not isempty(zset), 'ERR1: zset missing')", "assert(not isempty(deqName), 'ERR2: dequeue missing')", - " local pped = redis.call('ZRANGE', zset, 0, 0)", - " if next(pped) ~= nil then", - " for _,item in ipairs(pped) do", - " val = item:gsub('^%d*:', '')", - " redis.call('ZREM', zset, item)", - " redis.call('LPUSH', deqName, val)", - " end", + "local pped = redis.call('ZRANGE', zset, 0, 0)", + "if next(pped) ~= nil then", + " for _,item in ipairs(pped) do", + " val = string.gsub(item, '^%d*:', '')", + " redis.call('ZREM', zset, item)", + " redis.call('LPUSH', deqName, val)", " end", + "end", "return val"); } diff --git a/src/main/java/build/buildfarm/common/redis/RedisQueue.java b/src/main/java/build/buildfarm/common/redis/RedisQueue.java index 834a7c06c7..d6c11e7ec4 100644 --- a/src/main/java/build/buildfarm/common/redis/RedisQueue.java +++ b/src/main/java/build/buildfarm/common/redis/RedisQueue.java @@ -14,9 +14,14 @@ package build.buildfarm.common.redis; +import static redis.clients.jedis.args.ListDirection.LEFT; +import static redis.clients.jedis.args.ListDirection.RIGHT; + +import build.buildfarm.common.Queue; import build.buildfarm.common.StringVisitor; +import java.time.Duration; import java.util.List; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.Jedis; /** * @class RedisQueue @@ -24,10 +29,22 @@ * @details A redis queue is an implementation of a queue data structure which internally uses redis * to store and distribute the data. It's important to know that the lifetime of the queue * persists before and after the queue data structure is created (since it exists in redis). - * Therefore, two redis queues with the same name, would in fact be the same underlying redis - * queue. + * Therefore, two redis queues with the same name and redis service, would in fact be the same + * underlying redis queue. */ -public class RedisQueue extends QueueInterface { +public class RedisQueue implements Queue { + private static final int defaultListPageSize = 10000; + + public static Queue decorate(Jedis jedis, String name) { + return new RedisQueue(jedis, name, defaultListPageSize); + } + + private static double toRedisTimeoutSeconds(Duration timeout) { + return timeout.getSeconds() + timeout.getNano() / 1e9; + } + + private final Jedis jedis; + /** * @field name * @brief The unique name of the queue. @@ -36,16 +53,21 @@ public class RedisQueue extends QueueInterface { */ private final String name; + private final int listPageSize; + /** * @brief Constructor. * @details Construct a named redis queue with an established redis cluster. * @param name The global name of the queue. */ - public RedisQueue(String name) { - // In order for dequeue properly, the queue needs to have a hashtag. Otherwise it will error - // with: "No way to dispatch this command to Redis Cluster because keys have different slots." - // when trying to brpoplpush. If no hashtag was given we provide a default. + public RedisQueue(Jedis jedis, String name) { + this(jedis, name, defaultListPageSize); + } + + public RedisQueue(Jedis jedis, String name, int listPageSize) { + this.jedis = jedis; this.name = name; + this.listPageSize = listPageSize; } /** @@ -53,8 +75,9 @@ public RedisQueue(String name) { * @details Adds the value into the backend redis queue. * @param val The value to push onto the queue. */ - public void push(JedisCluster jedis, String val) { - push(jedis, val, 1); + @Override + public boolean offer(String val) { + return offer(val, 1); } /** @@ -62,8 +85,10 @@ public void push(JedisCluster jedis, String val) { * @details Adds the value into the backend redis queue. * @param val The value to push onto the queue. */ - public void push(JedisCluster jedis, String val, double priority) { + @Override + public boolean offer(String val, double priority) { jedis.lpush(name, val); + return true; } /** @@ -73,7 +98,8 @@ public void push(JedisCluster jedis, String val, double priority) { * @return Whether the value was removed. * @note Suggested return identifier: wasRemoved. */ - public boolean removeFromDequeue(JedisCluster jedis, String val) { + @Override + public boolean removeFromDequeue(String val) { return jedis.lrem(getDequeueName(), -1, val) != 0; } @@ -84,7 +110,7 @@ public boolean removeFromDequeue(JedisCluster jedis, String val) { * @return Whether the value was removed. * @note Suggested return identifier: wasRemoved. */ - public boolean removeAll(JedisCluster jedis, String val) { + public boolean removeAll(String val) { return jedis.lrem(name, 0, val) != 0; } @@ -93,19 +119,14 @@ public boolean removeAll(JedisCluster jedis, String val) { * @details This pops the element from one queue atomically into an internal list called the * dequeue. It will wait until the timeout has expired. Null is returned if the timeout has * expired. - * @param timeout_s Timeout to wait if there is no item to dequeue. (units: seconds (s)) + * @param timeout Timeout to wait if there is no item to dequeue. * @return The value of the transfered element. null if the thread was interrupted. * @note Overloaded. * @note Suggested return identifier: val. */ - public String dequeue(JedisCluster jedis, int timeout_s) throws InterruptedException { - for (int i = 0; i < timeout_s; ++i) { - String val = jedis.brpoplpush(name, getDequeueName(), 1); - if (val != null) { - return val; - } - } - return null; + @Override + public String take(Duration timeout) { + return jedis.blmove(name, getDequeueName(), RIGHT, LEFT, toRedisTimeoutSeconds(timeout)); } /** @@ -115,25 +136,9 @@ public String dequeue(JedisCluster jedis, int timeout_s) throws InterruptedExcep * @return The value of the transfered element. null if nothing was dequeued. * @note Suggested return identifier: val. */ - public String nonBlockingDequeue(JedisCluster jedis) throws InterruptedException { - String val = jedis.rpoplpush(name, getDequeueName()); - if (val != null) { - return val; - } - if (Thread.currentThread().isInterrupted()) { - throw new InterruptedException(); - } - return null; - } - - /** - * @brief Get name. - * @details Get the name of the queue. this is the redis key used for the list. - * @return The name of the queue. - * @note Suggested return identifier: name. - */ - public String getName() { - return name; + @Override + public String poll() { + return jedis.lmove(name, getDequeueName(), RIGHT, LEFT); } /** @@ -153,7 +158,7 @@ public String getDequeueName() { * @return The current length of the queue. * @note Suggested return identifier: length. */ - public long size(JedisCluster jedis) { + public long size() { return jedis.llen(name); } @@ -163,8 +168,8 @@ public long size(JedisCluster jedis) { * @param visitor A visitor for each visited element in the queue. * @note Overloaded. */ - public void visit(JedisCluster jedis, StringVisitor visitor) { - visit(jedis, name, visitor); + public void visit(StringVisitor visitor) { + visit(name, visitor); } /** @@ -172,8 +177,8 @@ public void visit(JedisCluster jedis, StringVisitor visitor) { * @details Enacts a visitor over each element in the dequeue. * @param visitor A visitor for each visited element in the queue. */ - public void visitDequeue(JedisCluster jedis, StringVisitor visitor) { - visit(jedis, getDequeueName(), visitor); + public void visitDequeue(StringVisitor visitor) { + visit(getDequeueName(), visitor); } /** @@ -183,9 +188,7 @@ public void visitDequeue(JedisCluster jedis, StringVisitor visitor) { * @param visitor A visitor for each visited element in the queue. * @note Overloaded. */ - private void visit(JedisCluster jedis, String queueName, StringVisitor visitor) { - int listPageSize = 10000; - + private void visit(String queueName, StringVisitor visitor) { int index = 0; int nextIndex = listPageSize; List entries; diff --git a/src/main/java/build/buildfarm/common/redis/RedisQueueFactory.java b/src/main/java/build/buildfarm/common/redis/RedisQueueFactory.java deleted file mode 100644 index 4b569f0d3c..0000000000 --- a/src/main/java/build/buildfarm/common/redis/RedisQueueFactory.java +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2020-2022 The Bazel Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package build.buildfarm.common.redis; - -import build.buildfarm.common.config.BuildfarmConfigs; -import build.buildfarm.common.config.Queue; - -/** - * @class RedisQueueFactory - * @brief A redis queue factory. - */ -public class RedisQueueFactory { - private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - - public QueueInterface getQueue(Queue.QUEUE_TYPE queueType, String name) { - if (queueType == null) { - return null; - } - if (queueType.equals(Queue.QUEUE_TYPE.standard)) { - return new RedisQueue(name); - } else if (queueType.equals(Queue.QUEUE_TYPE.priority)) { - return new RedisPriorityQueue(name, configs.getBackplane().getPriorityPollIntervalMillis()); - } - return null; - } -} diff --git a/src/main/java/build/buildfarm/common/redis/RedisSlotToHash.java b/src/main/java/build/buildfarm/common/redis/RedisSlotToHash.java index e54d7629e2..4feba7c30b 100644 --- a/src/main/java/build/buildfarm/common/redis/RedisSlotToHash.java +++ b/src/main/java/build/buildfarm/common/redis/RedisSlotToHash.java @@ -14,7 +14,11 @@ package build.buildfarm.common.redis; -import static redis.clients.jedis.JedisCluster.HASHSLOTS; +import static com.google.common.collect.Iterables.all; +import static com.google.common.collect.Iterables.any; +import static com.google.common.collect.Iterables.transform; +import static com.google.common.collect.Ordering.natural; +import static redis.clients.jedis.Protocol.CLUSTER_HASHSLOTS; import com.google.common.base.Preconditions; import java.util.ArrayList; @@ -34,6 +38,8 @@ * return that slot number. */ public class RedisSlotToHash { + private static final List LOOKUP_TABLE = createLookupTable(); + /** * @brief Convert slot number into string that hashes to slot. * @details A short alphanumeric string will be given which when hashed by redis's crc16 algorithm @@ -43,7 +49,7 @@ public class RedisSlotToHash { * @note Suggested return identifier: hashtag. */ public static String correlate(long slotNumber) { - Preconditions.checkState(slotNumber >= 0 && slotNumber < HASHSLOTS); + Preconditions.checkState(slotNumber >= 0 && slotNumber < CLUSTER_HASHSLOTS); return staticLookup(slotNumber); } @@ -58,7 +64,7 @@ public static String correlate(long slotNumber) { * @note Suggested return identifier: hashtag. */ public static String correlateRange(long start, long end) { - Preconditions.checkState(start >= 0 && end < HASHSLOTS); + Preconditions.checkState(start >= 0 && end < CLUSTER_HASHSLOTS); long hashNumber = 0; int slotNumber = JedisClusterCRC16.getSlot(Long.toString(hashNumber)); @@ -74,24 +80,31 @@ public static String correlateRange(long start, long end) { * range. * @details Dynamically generates hashtags and tests them for valid slot number. Slower than * static lookup, but less code. - * @param start The starting slot range number to find a hashable string for. - * @param end The ending slot range number to find a hashable string for. + * @param slotRanges The valid range of slots. * @param prefix A string prefix to include as part of the generated hashtag. * @return The string value to be used in a key's hashtag. * @note Suggested return identifier: hashtag. */ - public static String correlateRangeWithPrefix(long start, long end, String prefix) { - Preconditions.checkState(start >= 0 && end < HASHSLOTS); + public static String correlateRangesWithPrefix(Iterable> slotRanges, String prefix) { + Preconditions.checkState( + all(slotRanges, slotRange -> slotRange.size() == 2) + && natural().min(transform(slotRanges, slotRange -> slotRange.get(0))) >= 0 + && natural().max(transform(slotRanges, slotRange -> slotRange.get(1))) + < CLUSTER_HASHSLOTS); long hashNumber = 0; - int slotNumber = JedisClusterCRC16.getSlot(createHashtag(prefix, hashNumber)); - while (slotNumber < start || slotNumber > end) { + int slot = JedisClusterCRC16.getSlot(createHashtag(prefix, hashNumber)); + while (!slotRangesContainsSlot(slotRanges, slot)) { hashNumber++; - slotNumber = JedisClusterCRC16.getSlot(createHashtag(prefix, hashNumber)); + slot = JedisClusterCRC16.getSlot(createHashtag(prefix, hashNumber)); } return createHashtag(prefix, hashNumber); } + private static boolean slotRangesContainsSlot(Iterable> slotRanges, int slot) { + return any(slotRanges, range -> slot >= range.get(0) && slot <= range.get(1)); + } + /** * @brief Create hashtag. * @details Combine prefix with a generated number. @@ -112,8 +125,7 @@ private static String createHashtag(String prefix, long generated) { * @note Suggested return identifier: hashtag. */ private static String staticLookup(long slotNumber) { - List lookupTable = getLookupTable(); - return lookupTable.get((int) slotNumber); + return LOOKUP_TABLE.get((int) slotNumber); } /** @@ -124,8 +136,8 @@ private static String staticLookup(long slotNumber) { * @return The hashtags organized by slot index. * @note Suggested return identifier: lookupTable. */ - private static List getLookupTable() { - List lookupTable = new ArrayList<>(); + private static List createLookupTable() { + List lookupTable = new ArrayList<>(CLUSTER_HASHSLOTS); lookupTable.addAll(slots0To4999()); lookupTable.addAll(slots5000To9999()); lookupTable.addAll(slots10000To14999()); diff --git a/src/main/java/build/buildfarm/common/redis/Timestamp.java b/src/main/java/build/buildfarm/common/redis/Timestamp.java deleted file mode 100644 index 5ada4bbeb5..0000000000 --- a/src/main/java/build/buildfarm/common/redis/Timestamp.java +++ /dev/null @@ -1,11 +0,0 @@ -package build.buildfarm.common.redis; - -public class Timestamp { - public Long getMillis() { - return System.currentTimeMillis(); - }; - - public Long getNanos() { - return System.nanoTime(); - }; -} diff --git a/src/main/java/build/buildfarm/common/resources/BUILD b/src/main/java/build/buildfarm/common/resources/BUILD index 0e8924c447..e4d965223f 100644 --- a/src/main/java/build/buildfarm/common/resources/BUILD +++ b/src/main/java/build/buildfarm/common/resources/BUILD @@ -1,5 +1,5 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") load("@rules_java//java:defs.bzl", "java_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") package(default_visibility = ["//visibility:public"]) @@ -7,12 +7,12 @@ proto_library( name = "resource_proto", srcs = ["resource.proto"], deps = [ + "@com_google_googleapis//google/api:annotations_proto", + "@com_google_googleapis//google/longrunning:operations_proto", + "@com_google_googleapis//google/rpc:status_proto", "@com_google_protobuf//:duration_proto", "@com_google_protobuf//:timestamp_proto", - "@googleapis//:google_api_annotations_proto", - "@googleapis//:google_longrunning_operations_proto", - "@googleapis//:google_rpc_status_proto", - "@remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_proto", ], ) @@ -30,10 +30,8 @@ java_library( deps = [ "//src/main/java/build/buildfarm/common/resources:resource_java_proto", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "//third_party/jedis", - "@googleapis//:google_longrunning_operations_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_github_jnr_jnr_constants", "@maven//:com_github_jnr_jnr_ffi", "@maven//:com_github_jnr_jnr_posix", @@ -49,6 +47,7 @@ java_library( "@maven//:org_apache_commons_commons_compress", "@maven//:org_apache_commons_commons_lang3", "@maven//:org_threeten_threetenbp", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/common/services/BUILD b/src/main/java/build/buildfarm/common/services/BUILD index f2f643001e..d7afcd565a 100644 --- a/src/main/java/build/buildfarm/common/services/BUILD +++ b/src/main/java/build/buildfarm/common/services/BUILD @@ -12,25 +12,24 @@ java_library( "//src/main/java/build/buildfarm/common/resources:resource_java_proto", "//src/main/java/build/buildfarm/instance", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_devtools_build_v1_build_events_java_proto", - "@googleapis//:google_devtools_build_v1_publish_build_event_java_proto", - "@googleapis//:google_rpc_code_java_proto", + "//third_party/remote-apis:build_bazel_remote_asset_v1_remote_asset_java_grpc", + "//third_party/remote-apis:build_bazel_remote_execution_v2_remote_execution_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/devtools/build/v1:build_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_services", "@maven//:io_grpc_grpc_stub", "@maven//:io_prometheus_simpleclient", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_asset_v1_remote_asset_java_proto", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_grpc", - "@remote_apis//:build_bazel_semver_java_proto", + "@remoteapis//build/bazel/remote/asset/v1:remote_asset_java_proto", + "@remoteapis//build/bazel/semver:semver_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/common/services/ByteStreamService.java b/src/main/java/build/buildfarm/common/services/ByteStreamService.java index f457eee7fe..d2003c32e3 100644 --- a/src/main/java/build/buildfarm/common/services/ByteStreamService.java +++ b/src/main/java/build/buildfarm/common/services/ByteStreamService.java @@ -341,7 +341,7 @@ public void read(ReadRequest request, StreamObserver responseObser long offset = request.getReadOffset(); long limit = request.getReadLimit(); log.log( - Level.FINER, + Level.FINEST, format("read resource_name=%s offset=%d limit=%d", resourceName, offset, limit)); try { @@ -356,7 +356,7 @@ public void queryWriteStatus( QueryWriteStatusRequest request, StreamObserver responseObserver) { String resourceName = request.getResourceName(); try { - log.log(Level.FINE, format("queryWriteStatus(%s)", resourceName)); + log.log(Level.FINER, format("queryWriteStatus(%s)", resourceName)); Write write = getWrite(resourceName); responseObserver.onNext( QueryWriteStatusResponse.newBuilder() @@ -365,7 +365,7 @@ public void queryWriteStatus( .build()); responseObserver.onCompleted(); log.log( - Level.FINE, + Level.FINER, format( "queryWriteStatus(%s) => committed_size = %d, complete = %s", resourceName, write.getCommittedSize(), write.isComplete())); diff --git a/src/main/java/build/buildfarm/common/services/ContentAddressableStorageService.java b/src/main/java/build/buildfarm/common/services/ContentAddressableStorageService.java index 9395d63fea..6e00e39f67 100644 --- a/src/main/java/build/buildfarm/common/services/ContentAddressableStorageService.java +++ b/src/main/java/build/buildfarm/common/services/ContentAddressableStorageService.java @@ -109,7 +109,7 @@ public void onSuccess(FindMissingBlobsResponse.Builder builder) { long elapsedMicros = stopwatch.elapsed(MICROSECONDS); missingBlobs.observe(request.getBlobDigestsList().size()); log.log( - Level.FINE, + Level.FINER, "FindMissingBlobs(" + instance.getName() + ") for " diff --git a/src/main/java/build/buildfarm/common/services/WriteStreamObserver.java b/src/main/java/build/buildfarm/common/services/WriteStreamObserver.java index 4e0d28d68b..342fb36bad 100644 --- a/src/main/java/build/buildfarm/common/services/WriteStreamObserver.java +++ b/src/main/java/build/buildfarm/common/services/WriteStreamObserver.java @@ -72,6 +72,8 @@ public class WriteStreamObserver implements StreamObserver { private boolean initialized = false; private volatile boolean committed = false; private String name = null; + + @GuardedBy("this") private Write write = null; @GuardedBy("this") @@ -111,7 +113,7 @@ public synchronized void onNext(WriteRequest request) { Status status = Status.fromThrowable(e); if (errorResponse(status.asException())) { log.log( - status.getCode() == Status.Code.CANCELLED ? Level.FINE : Level.SEVERE, + status.getCode() == Status.Code.CANCELLED ? Level.FINER : Level.SEVERE, format("error writing %s", (name == null ? request.getResourceName() : name)), e); } @@ -156,7 +158,7 @@ synchronized void commitSynchronized(long committedSize) { if (Context.current().isCancelled()) { log.log( - Level.FINER, + Level.FINEST, format("skipped delivering committed_size to %s for cancelled context", name)); } else { try { @@ -174,19 +176,11 @@ synchronized void commitSynchronized(long committedSize) { } commitActive(committedSize); } catch (RuntimeException e) { - RequestMetadata requestMetadata = TracingMetadataUtils.fromCurrentContext(); Status status = Status.fromThrowable(e); if (errorResponse(status.asException())) { - log.log( - status.getCode() == Status.Code.CANCELLED ? Level.FINE : Level.SEVERE, - format( - "%s-%s: %s -> %s -> %s: error committing %s", - requestMetadata.getToolDetails().getToolName(), - requestMetadata.getToolDetails().getToolVersion(), - requestMetadata.getCorrelatedInvocationsId(), - requestMetadata.getToolInvocationId(), - requestMetadata.getActionId(), - name), + logWriteActivity( + status.getCode() == Status.Code.CANCELLED ? Level.FINER : Level.SEVERE, + "committing", e); } } @@ -198,7 +192,8 @@ void commitActive(long committedSize) { if (exception.compareAndSet(null, null)) { try { - log.log(Level.FINER, format("delivering committed_size for %s of %d", name, committedSize)); + log.log( + Level.FINEST, format("delivering committed_size for %s of %d", name, committedSize)); responseObserver.onNext(response); responseObserver.onCompleted(); } catch (Exception e) { @@ -218,9 +213,9 @@ private void initialize(WriteRequest request) throws InvalidResourceNameExceptio name = resourceName; try { write = getWrite(resourceName); - if (log.isLoggable(Level.FINER)) { + if (log.isLoggable(Level.FINEST)) { log.log( - Level.FINER, + Level.FINEST, format( "registering callback for %s: committed_size = %d (transient), complete = %s", resourceName, write.getCommittedSize(), write.isComplete())); @@ -236,7 +231,9 @@ public void onSuccess(Long committedSize) { @SuppressWarnings("NullableProblems") @Override public void onFailure(Throwable t) { - errorResponse(t); + if (errorResponse(t)) { + logWriteActivity("completing", t); + } } }, withCancellation.fixedContextExecutor(directExecutor())); @@ -254,6 +251,26 @@ public void onFailure(Throwable t) { } } + private void logWriteActivity(String activity, Throwable t) { + logWriteActivity(Level.SEVERE, activity, t); + } + + private void logWriteActivity(Level level, String activity, Throwable t) { + RequestMetadata requestMetadata = TracingMetadataUtils.fromCurrentContext(); + log.log( + level, + format( + "%s-%s: %s -> %s -> %s: error %s %s", + requestMetadata.getToolDetails().getToolName(), + requestMetadata.getToolDetails().getToolVersion(), + requestMetadata.getCorrelatedInvocationsId(), + requestMetadata.getToolInvocationId(), + requestMetadata.getActionId(), + activity, + name), + t); + } + private void logWriteRequest(WriteRequest request, Exception e) { log.log( Level.WARNING, @@ -267,7 +284,8 @@ private void logWriteRequest(WriteRequest request, Exception e) { private boolean errorResponse(Throwable t) { if (exception.compareAndSet(null, t)) { - if (Status.fromThrowable(t).getCode() == Status.Code.CANCELLED) { + if (Status.fromThrowable(t).getCode() == Status.Code.CANCELLED + || Context.current().isCancelled()) { return false; } boolean isEntryLimitException = t instanceof EntryLimitException; @@ -287,6 +305,13 @@ private boolean errorResponse(Throwable t) { requestMetadata.getToolInvocationId(), requestMetadata.getActionId(), name)); + } else { + log.log( + Level.WARNING, + format( + "error %s after %d requests and %d bytes at offset %d", + name, requestCount, requestBytes, earliestOffset), + t); } return true; } @@ -332,9 +357,14 @@ private void handleWrite(String resourceName, long offset, ByteString data, bool throws EntryLimitException { long committedSize; try { + if (offset == 0) { + write.reset(); + } committedSize = getCommittedSizeForWrite(); } catch (IOException e) { - errorResponse(e); + if (errorResponse(e)) { + logWriteActivity("querying", e); + } return; } if (offset != 0 && offset > committedSize) { @@ -358,10 +388,6 @@ private void handleWrite(String resourceName, long offset, ByteString data, bool resourceName, name)) .asException()); } else { - if (offset == 0 && offset != committedSize) { - write.reset(); - committedSize = 0; - } if (earliestOffset < 0 || offset < earliestOffset) { earliestOffset = offset; } @@ -380,7 +406,7 @@ private void handleWrite(String resourceName, long offset, ByteString data, bool data = data.substring(skipBytes); } log.log( - Level.FINER, + Level.FINEST, format( "writing %d to %s at %d%s", bytesToWrite, name, offset, finishWrite ? " with finish_write" : "")); @@ -396,7 +422,7 @@ private void handleWrite(String resourceName, long offset, ByteString data, bool @GuardedBy("this") private void close() { - log.log(Level.FINER, format("closing stream due to finishWrite for %s", name)); + log.log(Level.FINEST, format("closing stream due to finishWrite for %s", name)); try { getOutput().close(); } catch (DigestMismatchException e) { @@ -484,11 +510,18 @@ private FeedbackOutputStream getOutput() throws IOException { @Override public void onError(Throwable t) { - log.log(Level.FINE, format("write error for %s", name), t); + log.log(Level.FINER, format("write error for %s", name), t); } @Override - public void onCompleted() { - log.log(Level.FINE, format("write completed for %s", name)); + public synchronized void onCompleted() { + log.log(Level.FINER, format("write completed for %s", name)); + if (write == null) { + // we must return with a response lest we emit a grpc warning + // there can be no meaningful response at this point, as we + // have no idea what the size was + responseObserver.onNext(WriteResponse.getDefaultInstance()); + responseObserver.onCompleted(); + } } } diff --git a/src/main/java/build/buildfarm/instance/BUILD b/src/main/java/build/buildfarm/instance/BUILD index 5bb0451e96..ae290a6ea3 100644 --- a/src/main/java/build/buildfarm/instance/BUILD +++ b/src/main/java/build/buildfarm/instance/BUILD @@ -5,7 +5,7 @@ java_library( deps = [ "//src/main/java/build/buildfarm/common", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_longrunning_operations_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_failureaccess", "@maven//:com_google_guava_guava", @@ -15,6 +15,6 @@ java_library( "@maven//:io_grpc_grpc_core", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/instance/Instance.java b/src/main/java/build/buildfarm/instance/Instance.java index 91ae80a9b8..7844575801 100644 --- a/src/main/java/build/buildfarm/instance/Instance.java +++ b/src/main/java/build/buildfarm/instance/Instance.java @@ -132,6 +132,7 @@ ListenableFuture execute( boolean putAndValidateOperation(Operation operation) throws InterruptedException; boolean pollOperation(String operationName, ExecutionStage.Value stage); + // returns nextPageToken suitable for list restart String listOperations( int pageSize, String pageToken, String filter, ImmutableList.Builder operations); diff --git a/src/main/java/build/buildfarm/instance/MatchListener.java b/src/main/java/build/buildfarm/instance/MatchListener.java index 46f6dfc000..3da7df90eb 100644 --- a/src/main/java/build/buildfarm/instance/MatchListener.java +++ b/src/main/java/build/buildfarm/instance/MatchListener.java @@ -27,7 +27,4 @@ public interface MatchListener { boolean onEntry(@Nullable QueueEntry queueEntry) throws InterruptedException; void onError(Throwable t); - - // method that should be called when this match is cancelled and no longer valid - void setOnCancelHandler(Runnable onCancelHandler); } diff --git a/src/main/java/build/buildfarm/instance/Utils.java b/src/main/java/build/buildfarm/instance/Utils.java index cb5522b8cc..c25e07368c 100644 --- a/src/main/java/build/buildfarm/instance/Utils.java +++ b/src/main/java/build/buildfarm/instance/Utils.java @@ -47,7 +47,7 @@ public static ByteString getBlob( RequestMetadata requestMetadata) throws IOException, InterruptedException { return getBlob( - instance, compressor, blobDigest, /* offset=*/ 0, 60, TimeUnit.SECONDS, requestMetadata); + instance, compressor, blobDigest, /* offset= */ 0, 60, TimeUnit.SECONDS, requestMetadata); } public static ByteString getBlob( diff --git a/src/main/java/build/buildfarm/instance/server/BUILD b/src/main/java/build/buildfarm/instance/server/BUILD index 601519c867..210535a96e 100644 --- a/src/main/java/build/buildfarm/instance/server/BUILD +++ b/src/main/java/build/buildfarm/instance/server/BUILD @@ -1,8 +1,8 @@ java_library( name = "server", srcs = [ - "AbstractServerInstance.java", "GetDirectoryFunction.java", + "NodeInstance.java", "OperationsMap.java", "WatchFuture.java", ], @@ -17,21 +17,18 @@ java_library( "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/operations", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_longrunning_operations_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", - "@googleapis//:google_rpc_status_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", "@maven//:io_netty_netty_codec_http", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/instance/server/AbstractServerInstance.java b/src/main/java/build/buildfarm/instance/server/NodeInstance.java similarity index 96% rename from src/main/java/build/buildfarm/instance/server/AbstractServerInstance.java rename to src/main/java/build/buildfarm/instance/server/NodeInstance.java index 907e42aee7..f182eb6bbb 100644 --- a/src/main/java/build/buildfarm/instance/server/AbstractServerInstance.java +++ b/src/main/java/build/buildfarm/instance/server/NodeInstance.java @@ -16,6 +16,7 @@ import static build.buildfarm.common.Actions.asExecutionStatus; import static build.buildfarm.common.Actions.checkPreconditionFailure; +import static build.buildfarm.common.Errors.MISSING_INPUT; import static build.buildfarm.common.Errors.VIOLATION_TYPE_INVALID; import static build.buildfarm.common.Errors.VIOLATION_TYPE_MISSING; import static build.buildfarm.common.Trees.enumerateTreeFileDigests; @@ -144,7 +145,7 @@ import lombok.extern.java.Log; @Log -public abstract class AbstractServerInstance implements Instance { +public abstract class NodeInstance implements Instance { private final String name; protected final ContentAddressableStorage contentAddressableStorage; protected final ActionCache actionCache; @@ -178,8 +179,7 @@ public abstract class AbstractServerInstance implements Instance { public static final String ENVIRONMENT_VARIABLES_NOT_SORTED = "The `Command`'s `environment_variables` are not correctly sorted by `name`."; - public static final String MISSING_INPUT = - "A requested input (or the `Action` or its `Command`) was not found in the CAS."; + public static final String SYMLINK_TARGET_ABSOLUTE = "A symlink target is absolute."; public static final String MISSING_ACTION = "The action was not found in the CAS."; @@ -230,7 +230,7 @@ public abstract class AbstractServerInstance implements Instance { public static final String NO_REQUEUE_COMPLETE_MESSAGE = "Operation %s not requeued. Operation has already completed."; - public AbstractServerInstance( + public NodeInstance( String name, DigestUtil digestUtil, ContentAddressableStorage contentAddressableStorage, @@ -425,7 +425,7 @@ public ListenableFuture> getAllBlobsFuture(Iterable diges } protected ByteString getBlob(Digest blobDigest) throws InterruptedException { - return getBlob(blobDigest, /* count=*/ blobDigest.getSizeBytes()); + return getBlob(blobDigest, /* count= */ blobDigest.getSizeBytes()); } ByteString getBlob(Digest blobDigest, long count) throws IndexOutOfBoundsException { @@ -453,7 +453,7 @@ ByteString getBlob(Digest blobDigest, long count) throws IndexOutOfBoundsExcepti protected ListenableFuture getBlobFuture( Compressor.Value compressor, Digest blobDigest, RequestMetadata requestMetadata) { return getBlobFuture( - compressor, blobDigest, /* count=*/ blobDigest.getSizeBytes(), requestMetadata); + compressor, blobDigest, /* count= */ blobDigest.getSizeBytes(), requestMetadata); } protected ListenableFuture getBlobFuture( @@ -462,7 +462,7 @@ protected ListenableFuture getBlobFuture( getBlob( compressor, blobDigest, - /* offset=*/ 0, + /* offset= */ 0, count, new ServerCallStreamObserver() { ByteString content = ByteString.EMPTY; @@ -751,7 +751,8 @@ private static void enumerateActionInputDirectory( Directory directory, Map directoriesIndex, Consumer onInputFile, - Consumer onInputDirectory) { + Consumer onInputDirectory, + PreconditionFailure.Builder preconditionFailure) { Stack directoriesStack = new Stack<>(); directoriesStack.addAll(directory.getDirectoriesList()); @@ -763,15 +764,29 @@ private static void enumerateActionInputDirectory( directoryPath.isEmpty() ? directoryName : (directoryPath + "/" + directoryName); onInputDirectory.accept(subDirectoryPath); - for (FileNode fileNode : directoriesIndex.get(directoryDigest).getFilesList()) { - String fileName = fileNode.getName(); - String filePath = subDirectoryPath + "/" + fileName; - onInputFile.accept(filePath); + Directory subDirectory; + if (directoryDigest.getSizeBytes() == 0) { + subDirectory = Directory.getDefaultInstance(); + } else { + subDirectory = directoriesIndex.get(directoryDigest); } - for (DirectoryNode subDirectoryNode : - directoriesIndex.get(directoryDigest).getDirectoriesList()) { - directoriesStack.push(subDirectoryNode); + if (subDirectory == null) { + preconditionFailure + .addViolationsBuilder() + .setType(VIOLATION_TYPE_MISSING) + .setSubject("blobs/" + DigestUtil.toString(directoryDigest)) + .setDescription("The directory `/" + subDirectoryPath + "` was not found in the CAS."); + } else { + for (FileNode fileNode : subDirectory.getFilesList()) { + String fileName = fileNode.getName(); + String filePath = subDirectoryPath + "/" + fileName; + onInputFile.accept(filePath); + } + + for (DirectoryNode subDirectoryNode : subDirectory.getDirectoriesList()) { + directoriesStack.push(subDirectoryNode); + } } } } @@ -789,6 +804,7 @@ public static void validateActionInputDirectory( Stack pathDigests, Set visited, Map directoriesIndex, + boolean allowSymlinkTargetAbsolute, Consumer onInputFile, Consumer onInputDirectory, Consumer onInputDigest, @@ -837,6 +853,14 @@ public static void validateActionInputDirectory( .setSubject("/" + directoryPath + ": " + lastSymlinkName + " > " + symlinkName) .setDescription(DIRECTORY_NOT_SORTED); } + String symlinkTarget = symlinkNode.getTarget(); + if (!allowSymlinkTargetAbsolute && symlinkTarget.charAt(0) == '/') { + preconditionFailure + .addViolationsBuilder() + .setType(VIOLATION_TYPE_INVALID) + .setSubject("/" + directoryPath + ": " + symlinkName + " -> " + symlinkTarget) + .setDescription(SYMLINK_TARGET_ABSOLUTE); + } /* FIXME serverside validity check? regex? Preconditions.checkState( isValidFilename(symlinkName), @@ -893,7 +917,12 @@ public static void validateActionInputDirectory( subDirectory = directoriesIndex.get(directoryDigest); } enumerateActionInputDirectory( - subDirectoryPath, subDirectory, directoriesIndex, onInputFile, onInputDirectory); + subDirectoryPath, + subDirectory, + directoriesIndex, + onInputFile, + onInputDirectory, + preconditionFailure); } else { validateActionInputDirectoryDigest( subDirectoryPath, @@ -901,6 +930,7 @@ public static void validateActionInputDirectory( pathDigests, visited, directoriesIndex, + allowSymlinkTargetAbsolute, onInputFile, onInputDirectory, onInputDigest, @@ -916,6 +946,7 @@ private static void validateActionInputDirectoryDigest( Stack pathDigests, Set visited, Map directoriesIndex, + boolean allowSymlinkTargetAbsolute, Consumer onInputFile, Consumer onInputDirectory, Consumer onInputDigest, @@ -940,13 +971,17 @@ private static void validateActionInputDirectoryDigest( pathDigests, visited, directoriesIndex, + allowSymlinkTargetAbsolute, onInputFile, onInputDirectory, onInputDigest, preconditionFailure); } pathDigests.pop(); - visited.add(directoryDigest); + if (directory != null) { + // missing directories are not visited and will appear in violations list each time + visited.add(directoryDigest); + } } protected ListenableFuture getTreeFuture( @@ -957,7 +992,7 @@ protected ListenableFuture getTreeFuture( Tree.Builder tree = Tree.newBuilder().setRootDigest(inputRoot); TokenizableIterator iterator = - createTreeIterator(reason, inputRoot, /* pageToken=*/ ""); + createTreeIterator(reason, inputRoot, /* pageToken= */ ""); while (iterator.hasNext()) { DirectoryEntry entry = iterator.next(); Directory directory = entry.getDirectory(); @@ -1159,6 +1194,9 @@ void validateCommand( } else { Directory directory = directoriesIndex.get(inputRootDigest); for (String segment : workingDirectory.split("/")) { + if (segment.equals(".")) { + continue; + } Directory nextDirectory = directory; // linear for now for (DirectoryNode dirNode : directory.getDirectoriesList()) { @@ -1191,12 +1229,16 @@ protected void validateAction( ImmutableSet.Builder inputFilesBuilder = ImmutableSet.builder(); inputDirectoriesBuilder.add(ACTION_INPUT_ROOT_DIRECTORY_PATH); + boolean allowSymlinkTargetAbsolute = + getCacheCapabilities().getSymlinkAbsolutePathStrategy() + == SymlinkAbsolutePathStrategy.Value.ALLOWED; validateActionInputDirectoryDigest( ACTION_INPUT_ROOT_DIRECTORY_PATH, action.getInputRootDigest(), new Stack<>(), new HashSet<>(), directoriesIndex, + allowSymlinkTargetAbsolute, inputFilesBuilder::add, inputDirectoriesBuilder::add, onInputDigest, @@ -1832,8 +1874,7 @@ protected void errorOperation( .setRequestMetadata(requestMetadata) .build(); putOperation( - operation - .toBuilder() + operation.toBuilder() .setDone(true) .setMetadata(Any.pack(completedMetadata)) .setResponse(Any.pack(ExecuteResponse.newBuilder().setStatus(status).build())) @@ -1861,8 +1902,7 @@ protected void expireOperation(Operation operation) throws InterruptedException } metadata = metadata.toBuilder().setStage(ExecutionStage.Value.COMPLETED).build(); putOperation( - operation - .toBuilder() + operation.toBuilder() .setDone(true) .setMetadata(Any.pack(metadata)) .setResponse(Any.pack(executeResponse)) @@ -1937,19 +1977,18 @@ public ServerCapabilities getCapabilities() { @Override public WorkerProfileMessage getWorkerProfile() { throw new UnsupportedOperationException( - "AbstractServerInstance doesn't support getWorkerProfile() method."); + "NodeInstance doesn't support getWorkerProfile() method."); } @Override public WorkerListMessage getWorkerList() { - throw new UnsupportedOperationException( - "AbstractServerInstance doesn't support getWorkerList() method."); + throw new UnsupportedOperationException("NodeInstance doesn't support getWorkerList() method."); } @Override public PrepareWorkerForGracefulShutDownRequestResults shutDownWorkerGracefully() { throw new UnsupportedOperationException( - "AbstractServerInstance doesn't support drainWorkerPipeline() method."); + "NodeInstance doesn't support drainWorkerPipeline() method."); } @Override diff --git a/src/main/java/build/buildfarm/instance/shard/BUILD b/src/main/java/build/buildfarm/instance/shard/BUILD index 9b11f1543e..bde47183db 100644 --- a/src/main/java/build/buildfarm/instance/shard/BUILD +++ b/src/main/java/build/buildfarm/instance/shard/BUILD @@ -8,6 +8,7 @@ java_library( "//src/main/java/build/buildfarm/backplane", "//src/main/java/build/buildfarm/cas", "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common:BuildfarmExecutors", "//src/main/java/build/buildfarm/common/config", "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/common/redis", @@ -17,9 +18,7 @@ java_library( "//src/main/java/build/buildfarm/operations", "//src/main/java/build/buildfarm/operations/finder", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "//third_party/jedis", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_github_ben_manes_caffeine_caffeine", "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", @@ -28,7 +27,6 @@ java_library( "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", "@maven//:io_prometheus_simpleclient", @@ -36,6 +34,7 @@ java_library( "@maven//:org_apache_commons_commons_pool2", "@maven//:org_projectlombok_lombok", "@maven//:org_redisson_redisson", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/instance/shard/CasWorkerMap.java b/src/main/java/build/buildfarm/instance/shard/CasWorkerMap.java index 4e6f7bb617..55b2933e42 100644 --- a/src/main/java/build/buildfarm/instance/shard/CasWorkerMap.java +++ b/src/main/java/build/buildfarm/instance/shard/CasWorkerMap.java @@ -94,6 +94,14 @@ void removeAll(RedisClient client, Iterable blobDigests, String workerNa */ Set get(RedisClient client, Digest blobDigest) throws IOException; + /** + * @brief Get insert time for the digest. + * @param client Client used for interacting with redis when not using cacheMap. + * @param blobDigest The blob digest to lookup for insert time. + * @return insert time of the digest. + */ + long insertTime(RedisClient client, Digest blobDigest) throws IOException; + /** * @brief Get all of the key values as a map from the digests given. * @details If there are no workers for the digest, the key is left out of the returned map. @@ -113,4 +121,11 @@ Map> getMap(RedisClient client, Iterable blobDigests * @note Suggested return identifier: mapSize. */ int size(RedisClient client) throws IOException; + + /** + * @brief Set the expiry duration for the digests. + * @param client Client used for interacting with redis when not using cacheMap. + * @param blobDigests The blob digests to set new the expiry duration. + */ + void setExpire(RedisClient client, Iterable blobDigests) throws IOException; } diff --git a/src/main/java/build/buildfarm/instance/shard/DistributedStateCreator.java b/src/main/java/build/buildfarm/instance/shard/DistributedStateCreator.java index b5fdcebbea..5ed7255734 100644 --- a/src/main/java/build/buildfarm/instance/shard/DistributedStateCreator.java +++ b/src/main/java/build/buildfarm/instance/shard/DistributedStateCreator.java @@ -19,11 +19,14 @@ import build.buildfarm.common.config.Queue; import build.buildfarm.common.redis.BalancedRedisQueue; import build.buildfarm.common.redis.ProvisionedRedisQueue; +import build.buildfarm.common.redis.QueueDecorator; import build.buildfarm.common.redis.RedisClient; import build.buildfarm.common.redis.RedisHashMap; import build.buildfarm.common.redis.RedisHashtags; import build.buildfarm.common.redis.RedisMap; import build.buildfarm.common.redis.RedisNodeHashes; +import build.buildfarm.common.redis.RedisPriorityQueue; +import build.buildfarm.common.redis.RedisQueue; import com.google.common.collect.ImmutableList; import com.google.common.collect.LinkedHashMultimap; import com.google.common.collect.SetMultimap; @@ -90,7 +93,7 @@ private static BalancedRedisQueue createPrequeue(RedisClient client) throws IOEx getPreQueuedOperationsListName(), getQueueHashes(client, getPreQueuedOperationsListName()), configs.getBackplane().getMaxPreQueueDepth(), - getQueueType()); + getQueueDecorator()); } private static OperationQueue createOperationQueue(RedisClient client) throws IOException { @@ -104,7 +107,7 @@ private static OperationQueue createOperationQueue(RedisClient client) throws IO ProvisionedRedisQueue provisionedQueue = new ProvisionedRedisQueue( getQueueName(queueConfig), - getQueueType(), + getQueueDecorator(), getQueueHashes(client, getQueueName(queueConfig)), toMultimap(queueConfig.getPlatform().getPropertiesList()), queueConfig.isAllowUnmatched()); @@ -124,7 +127,7 @@ private static OperationQueue createOperationQueue(RedisClient client) throws IO ProvisionedRedisQueue defaultQueue = new ProvisionedRedisQueue( getQueuedOperationsListName(), - getQueueType(), + getQueueDecorator(), getQueueHashes(client, getQueuedOperationsListName()), defaultProvisions); provisionedQueues.add(defaultQueue); @@ -148,6 +151,12 @@ private static SetMultimap toMultimap(List pr return set; } + private static QueueDecorator getQueueDecorator() { + return configs.getBackplane().isPriorityQueue() + ? RedisPriorityQueue::decorate + : RedisQueue::decorate; + } + private static Queue.QUEUE_TYPE getQueueType() { return configs.getBackplane().isPriorityQueue() ? Queue.QUEUE_TYPE.priority diff --git a/src/main/java/build/buildfarm/instance/shard/JedisCasWorkerMap.java b/src/main/java/build/buildfarm/instance/shard/JedisCasWorkerMap.java index 097bc2e085..ec379b898b 100644 --- a/src/main/java/build/buildfarm/instance/shard/JedisCasWorkerMap.java +++ b/src/main/java/build/buildfarm/instance/shard/JedisCasWorkerMap.java @@ -20,9 +20,10 @@ import build.buildfarm.common.redis.RedisClient; import com.google.common.collect.ImmutableMap; import java.io.IOException; +import java.time.Instant; import java.util.Map; import java.util.Set; -import redis.clients.jedis.JedisClusterPipeline; +import redis.clients.jedis.PipelineBase; /** * @class JedisCasWorkerMap @@ -116,13 +117,13 @@ public void addAll(RedisClient client, Iterable blobDigests, String work throws IOException { client.run( jedis -> { - JedisClusterPipeline p = jedis.pipelined(); - for (Digest blobDigest : blobDigests) { - String key = redisCasKey(blobDigest); - p.sadd(key, workerName); - p.expire(key, keyExpiration_s); + try (PipelineBase p = jedis.pipelined()) { + for (Digest blobDigest : blobDigests) { + String key = redisCasKey(blobDigest); + p.sadd(key, workerName); + p.expire(key, keyExpiration_s); + } } - p.sync(); }); } @@ -152,12 +153,12 @@ public void removeAll(RedisClient client, Iterable blobDigests, String w throws IOException { client.run( jedis -> { - JedisClusterPipeline p = jedis.pipelined(); - for (Digest blobDigest : blobDigests) { - String key = redisCasKey(blobDigest); - p.srem(key, workerName); + try (PipelineBase p = jedis.pipelined()) { + for (Digest blobDigest : blobDigests) { + String key = redisCasKey(blobDigest); + p.srem(key, workerName); + } } - p.sync(); }); } @@ -189,6 +190,12 @@ public Set get(RedisClient client, Digest blobDigest) throws IOException return client.call(jedis -> jedis.smembers(key)); } + @Override + public long insertTime(RedisClient client, Digest blobDigest) throws IOException { + String key = redisCasKey(blobDigest); + return Instant.now().getEpochSecond() - keyExpiration_s + client.call(jedis -> jedis.ttl(key)); + } + /** * @brief Get all of the key values as a map from the digests given. * @details If there are no workers for the digest, the key is left out of the returned map. @@ -227,6 +234,17 @@ public int size(RedisClient client) throws IOException { return client.call(jedis -> ScanCount.get(jedis, name + ":*", 1000)); } + @Override + public void setExpire(RedisClient client, Iterable blobDigests) throws IOException { + client.run( + jedis -> { + for (Digest blobDigest : blobDigests) { + String key = redisCasKey(blobDigest); + jedis.expire(key, keyExpiration_s); + } + }); + } + /** * @brief Get the redis key name. * @details This is to be used for the direct redis implementation. diff --git a/src/main/java/build/buildfarm/instance/shard/JedisClusterFactory.java b/src/main/java/build/buildfarm/instance/shard/JedisClusterFactory.java index 922558b50b..14609ea129 100644 --- a/src/main/java/build/buildfarm/instance/shard/JedisClusterFactory.java +++ b/src/main/java/build/buildfarm/instance/shard/JedisClusterFactory.java @@ -16,6 +16,8 @@ import build.buildfarm.common.config.BuildfarmConfigs; import com.google.common.base.Strings; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; import java.net.URI; import java.net.URISyntaxException; import java.util.HashSet; @@ -23,13 +25,17 @@ import java.util.Set; import java.util.function.Supplier; import javax.naming.ConfigurationException; +import redis.clients.jedis.ConnectionPool; +import redis.clients.jedis.ConnectionPoolConfig; +import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; -import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisCluster; -import redis.clients.jedis.JedisPool; -import redis.clients.jedis.JedisPoolConfig; -import redis.clients.jedis.ScanParams; -import redis.clients.jedis.ScanResult; +import redis.clients.jedis.JedisPooled; +import redis.clients.jedis.UnifiedJedis; +import redis.clients.jedis.exceptions.JedisClusterOperationException; +import redis.clients.jedis.params.ScanParams; +import redis.clients.jedis.resps.ScanResult; +import redis.clients.jedis.util.JedisURIHelper; /** * @class JedisClusterFactory @@ -43,33 +49,32 @@ public class JedisClusterFactory { * @brief Create a jedis cluster instance. * @details Use proto configuration to connect to a redis cluster server and provide a jedis * client. - * @param config Configuration for connecting to a redis cluster server. + * @param identifier Redis Client name. * @return An established jedis client used to operate on the redis cluster. * @note Suggested return identifier: jedis. + * @link Redis Client name */ - public static Supplier create() throws ConfigurationException { + public static Supplier create(String identifier) throws ConfigurationException { // null password is required to elicit no auth in jedis String[] redisNodes = configs.getBackplane().getRedisNodes(); if (redisNodes != null && redisNodes.length > 0) { return createJedisClusterFactory( + identifier, list2Set(redisNodes), configs.getBackplane().getTimeout(), configs.getBackplane().getMaxAttempts(), - Strings.isNullOrEmpty(configs.getBackplane().getRedisPassword()) - ? null - : configs.getBackplane().getRedisPassword(), - createJedisPoolConfig()); + Strings.emptyToNull(configs.getBackplane().getRedisPassword()), + createConnectionPoolConfig()); } // support "" as redis password. return createJedisClusterFactory( + identifier, parseUri(configs.getBackplane().getRedisUri()), configs.getBackplane().getTimeout(), configs.getBackplane().getMaxAttempts(), - Strings.isNullOrEmpty(configs.getBackplane().getRedisPassword()) - ? null - : configs.getBackplane().getRedisPassword(), - createJedisPoolConfig()); + Strings.emptyToNull(configs.getBackplane().getRedisPassword()), + createConnectionPoolConfig()); } /** @@ -79,8 +84,8 @@ public static Supplier create() throws ConfigurationException { * @return An established test jedis client used to operate on a redis cluster. * @note Suggested return identifier: jedis. */ - public static JedisCluster createTest() throws Exception { - JedisCluster redis = JedisClusterFactory.create().get(); + public static UnifiedJedis createTest() throws Exception { + UnifiedJedis redis = JedisClusterFactory.create("test").get(); // use the client to create an empty redis cluster // this will prevent any persistent data across test runs @@ -96,10 +101,15 @@ public static JedisCluster createTest() throws Exception { * @param cluster An established jedis client to operate on a redis cluster. * @note Overloaded. */ - private static void deleteExistingKeys(JedisCluster cluster) throws Exception { - for (JedisPool pool : cluster.getClusterNodes().values()) { - Jedis node = pool.getResource(); - deleteExistingKeys(node); + private static void deleteExistingKeys(UnifiedJedis jedis) throws Exception { + if (jedis instanceof JedisCluster) { + JedisCluster cluster = (JedisCluster) jedis; + for (ConnectionPool pool : cluster.getClusterNodes().values()) { + UnifiedJedis node = new UnifiedJedis(pool.getResource()); + deleteNodeExistingKeys(node); + } + } else { + deleteNodeExistingKeys(jedis); } } @@ -111,7 +121,7 @@ private static void deleteExistingKeys(JedisCluster cluster) throws Exception { * @note Overloaded. */ @SuppressWarnings({"unchecked", "rawtypes"}) - private static void deleteExistingKeys(Jedis node) { + private static void deleteNodeExistingKeys(UnifiedJedis node) { String nextCursor = "0"; Set matchingKeys = new HashSet<>(); ScanParams params = new ScanParams(); @@ -139,6 +149,40 @@ private static void deleteExistingKeys(Jedis node) { } } + private static UnifiedJedis createJedis( + Set hostAndPorts, + int connectionTimeout, + int soTimeout, + int maxAttempts, + String password, + String identifier, + ConnectionPoolConfig poolConfig, + boolean ssl) { + try { + return new JedisCluster( + hostAndPorts, + connectionTimeout, + soTimeout, + maxAttempts, + password, + identifier, + poolConfig, + ssl); + } catch (JedisClusterOperationException e) { + // probably not a cluster + return new JedisPooled( + poolConfig, + Iterables.getOnlyElement(hostAndPorts), + DefaultJedisClientConfig.builder() + .connectionTimeoutMillis(connectionTimeout) + .socketTimeoutMillis(soTimeout) + .password(password) + .clientName(identifier) + .ssl(ssl) + .build()); + } + } + /** * @brief Create a jedis cluster instance with connection settings. * @details Use the URI, pool and connection information to connect to a redis cluster server and @@ -150,16 +194,23 @@ private static void deleteExistingKeys(Jedis node) { * @return An established jedis client used to operate on the redis cluster. * @note Suggested return identifier: jedis. */ - private static Supplier createJedisClusterFactory( - URI redisUri, int timeout, int maxAttempts, String password, JedisPoolConfig poolConfig) { + private static Supplier createJedisClusterFactory( + String identifier, + URI redisUri, + int timeout, + int maxAttempts, + String password, + ConnectionPoolConfig poolConfig) { return () -> - new JedisCluster( - new HostAndPort(redisUri.getHost(), redisUri.getPort()), - /* connectionTimeout=*/ Integer.max(2000, timeout), - /* soTimeout=*/ Integer.max(2000, timeout), + createJedis( + ImmutableSet.of(new HostAndPort(redisUri.getHost(), redisUri.getPort())), + /* connectionTimeout= */ Integer.max(2000, timeout), + /* soTimeout= */ Integer.max(2000, timeout), Integer.max(5, maxAttempts), password, - poolConfig); + identifier, + poolConfig, + /* ssl= */ JedisURIHelper.isRedisSSLScheme(redisUri)); } /** @@ -173,32 +224,35 @@ private static Supplier createJedisClusterFactory( * @return An established jedis client used to operate on the redis cluster. * @note Suggested return identifier: jedis. */ - private static Supplier createJedisClusterFactory( + private static Supplier createJedisClusterFactory( + String identifier, Set redisUrisNodes, int timeout, int maxAttempts, String password, - JedisPoolConfig poolConfig) { + ConnectionPoolConfig poolConfig) { return () -> - new JedisCluster( + createJedis( redisUrisNodes, - /* connectionTimeout=*/ Integer.max(2000, timeout), - /* soTimeout=*/ Integer.max(2000, timeout), + /* connectionTimeout= */ Integer.max(2000, timeout), + /* soTimeout= */ Integer.max(2000, timeout), Integer.max(5, maxAttempts), password, - poolConfig); + identifier, + poolConfig, + /* ssl= */ false); } + /** - * @brief Create a jedis pool config. + * @brief Create a connection pool config. * @details Use configuration to build the appropriate jedis pool configuration. - * @param config Configuration for connecting to a redis cluster server. * @return A created jedis pool config. * @note Suggested return identifier: poolConfig. */ - private static JedisPoolConfig createJedisPoolConfig() { - JedisPoolConfig jedisPoolConfig = new JedisPoolConfig(); - jedisPoolConfig.setMaxTotal(configs.getBackplane().getJedisPoolMaxTotal()); - return jedisPoolConfig; + private static ConnectionPoolConfig createConnectionPoolConfig() { + ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(); + connectionPoolConfig.setMaxTotal(configs.getBackplane().getJedisPoolMaxTotal()); + return connectionPoolConfig; } /** diff --git a/src/main/java/build/buildfarm/instance/shard/OperationQueue.java b/src/main/java/build/buildfarm/instance/shard/OperationQueue.java index 64f1f2befa..5cf7651b85 100644 --- a/src/main/java/build/buildfarm/instance/shard/OperationQueue.java +++ b/src/main/java/build/buildfarm/instance/shard/OperationQueue.java @@ -24,7 +24,8 @@ import com.google.common.collect.SetMultimap; import java.util.ArrayList; import java.util.List; -import redis.clients.jedis.JedisCluster; +import java.util.stream.Collectors; +import redis.clients.jedis.UnifiedJedis; /** * @class OperationQueue @@ -48,6 +49,14 @@ public class OperationQueue { */ private final List queues; + /** + * @field currentDequeueIndex + * @brief The current queue index to dequeue from. + * @details Used in a round-robin fashion to ensure an even distribution of dequeues across + * matched queues. + */ + private int currentDequeueIndex = 0; + /** * @brief Constructor. * @details Construct the operation queue with various provisioned redis queues. @@ -75,7 +84,7 @@ public OperationQueue(List queues, int maxQueueSize) { * @param jedis Jedis cluster client. * @param visitor A visitor for each visited element in the queue. */ - public void visitDequeue(JedisCluster jedis, StringVisitor visitor) { + public void visitDequeue(UnifiedJedis jedis, StringVisitor visitor) { for (ProvisionedRedisQueue provisionedQueue : queues) { provisionedQueue.queue().visitDequeue(jedis, visitor); } @@ -89,7 +98,7 @@ public void visitDequeue(JedisCluster jedis, StringVisitor visitor) { * @return Whether or not the value was removed. * @note Suggested return identifier: wasRemoved. */ - public boolean removeFromDequeue(JedisCluster jedis, String val) { + public boolean removeFromDequeue(UnifiedJedis jedis, String val) { for (ProvisionedRedisQueue provisionedQueue : queues) { if (provisionedQueue.queue().removeFromDequeue(jedis, val)) { return true; @@ -104,7 +113,7 @@ public boolean removeFromDequeue(JedisCluster jedis, String val) { * @param jedis Jedis cluster client. * @param visitor A visitor for each visited element in the queue. */ - public void visit(JedisCluster jedis, StringVisitor visitor) { + public void visit(UnifiedJedis jedis, StringVisitor visitor) { for (ProvisionedRedisQueue provisionedQueue : queues) { provisionedQueue.queue().visit(jedis, visitor); } @@ -117,7 +126,7 @@ public void visit(JedisCluster jedis, StringVisitor visitor) { * @return The current length of the queue. * @note Suggested return identifier: length. */ - public long size(JedisCluster jedis) { + public long size(UnifiedJedis jedis) { // the accumulated size of all of the queues return queues.stream().mapToInt(i -> (int) i.queue().size(jedis)).sum(); } @@ -169,9 +178,9 @@ public String getName(List provisions) { * @param val The value to push onto the queue. */ public void push( - JedisCluster jedis, List provisions, String val, int priority) { + UnifiedJedis jedis, List provisions, String val, int priority) { BalancedRedisQueue queue = chooseEligibleQueue(provisions); - queue.push(jedis, val, (double) priority); + queue.offer(jedis, val, (double) priority); } /** @@ -184,10 +193,21 @@ public void push( * @return The value of the transfered element. null if the thread was interrupted. * @note Suggested return identifier: val. */ - public String dequeue(JedisCluster jedis, List provisions) + public String dequeue(UnifiedJedis jedis, List provisions) throws InterruptedException { - BalancedRedisQueue queue = chooseEligibleQueue(provisions); - return queue.dequeue(jedis); + // Select all matched queues, and attempt dequeuing via round-robin. + List queues = chooseEligibleQueues(provisions); + // Keep iterating over matched queues until we find one that is non-empty and provides a + // dequeued value. + for (int index = roundRobinPopIndex(queues); ; index = roundRobinPopIndex(queues)) { + if (Thread.currentThread().isInterrupted()) { + throw new InterruptedException(); + } + String value = queues.get(index).poll(jedis); + if (value != null) { + return value; + } + } } /** @@ -198,7 +218,7 @@ public String dequeue(JedisCluster jedis, List provisions) * @note Overloaded. * @note Suggested return identifier: status. */ - public OperationQueueStatus status(JedisCluster jedis) { + public OperationQueueStatus status(UnifiedJedis jedis) { // get properties List provisions = new ArrayList<>(); for (ProvisionedRedisQueue provisionedQueue : queues) { @@ -221,7 +241,7 @@ public OperationQueueStatus status(JedisCluster jedis) { * @note Overloaded. * @note Suggested return identifier: status. */ - public QueueStatus status(JedisCluster jedis, List provisions) { + public QueueStatus status(UnifiedJedis jedis, List provisions) { BalancedRedisQueue queue = chooseEligibleQueue(provisions); return queue.status(jedis); } @@ -251,7 +271,7 @@ public boolean isEligible(List properties) { * @param jedis Jedis cluster client. * @return Whether are not a new element can be added to the queue based on its current size. */ - public boolean canQueue(JedisCluster jedis) { + public boolean canQueue(UnifiedJedis jedis) { return maxQueueSize < 0 || size(jedis) < maxQueueSize; } @@ -270,6 +290,39 @@ private BalancedRedisQueue chooseEligibleQueue(List provision } } + throwNoEligibleQueueException(provisions); + return null; + } + + /** + * @brief Choose an eligible queues based on given properties. + * @details We use the platform execution properties of a queue entry to determine the appropriate + * queues. If there no eligible queues, an exception is thrown. + * @param provisions Provisions to check that requirements are met. + * @return The chosen queues. + * @note Suggested return identifier: queues. + */ + private List chooseEligibleQueues(List provisions) { + List eligibleQueues = + queues.stream() + .filter(provisionedQueue -> provisionedQueue.isEligible(toMultimap(provisions))) + .map(provisionedQueue -> provisionedQueue.queue()) + .collect(Collectors.toList()); + + if (eligibleQueues.isEmpty()) { + throwNoEligibleQueueException(provisions); + } + + return eligibleQueues; + } + + /** + * @brief Throw an exception that explains why there are no eligible queues. + * @details This function should only be called, when there were no matched queues. + * @param provisions Provisions to check that requirements are met. + * @return no return. + */ + private void throwNoEligibleQueueException(List provisions) { // At this point, we were unable to match an action to an eligible queue. // We will build an error explaining why the matching failed. This will help user's properly // configure their queue or adjust the execution_properties of their actions. @@ -280,12 +333,42 @@ private BalancedRedisQueue chooseEligibleQueue(List provision } throw new RuntimeException( - "there are no eligible queues for the provided execution requirements." - + " One solution to is to configure a provision queue with no requirements which would be eligible to all operations." - + " See https://github.com/bazelbuild/bazel-buildfarm/wiki/Shard-Platform-Operation-Queue for details. " + "There are no eligible queues for the provided execution requirements. One solution to is" + + " to configure a provision queue with no requirements which would be eligible to all" + + " operations. See" + + " https://bazelbuild.github.io/bazel-buildfarm/docs/architecture/queues/" + + " for details. " + eligibilityResults); } + /** + * @brief Get the current queue index for round-robin dequeues. + * @details Adjusts the round-robin index for next call. + * @param matchedQueues The queues to round robin. + * @return The current round-robin index. + * @note Suggested return identifier: queueIndex. + */ + private int roundRobinPopIndex(List matchedQueues) { + int currentIndex = currentDequeueIndex; + currentDequeueIndex = nextQueueInRoundRobin(currentDequeueIndex, matchedQueues); + return currentIndex; + } + + /** + * @brief Get the next queue in the round robin. + * @details If we are currently on the last queue it becomes the first queue. + * @param index Current queue index. + * @param matchedQueues The queues to round robin. + * @return And adjusted val based on the current queue index. + * @note Suggested return identifier: adjustedCurrentQueue. + */ + private int nextQueueInRoundRobin(int index, List matchedQueues) { + if (index >= matchedQueues.size() - 1) { + return 0; + } + return index + 1; + } + /** * @brief Convert proto provisions into java multimap. * @details This conversion is done to more easily check if a key/value exists in the provisions. diff --git a/src/main/java/build/buildfarm/instance/shard/Operations.java b/src/main/java/build/buildfarm/instance/shard/Operations.java index 0088bdf038..b3c37ae25d 100644 --- a/src/main/java/build/buildfarm/instance/shard/Operations.java +++ b/src/main/java/build/buildfarm/instance/shard/Operations.java @@ -14,10 +14,11 @@ package build.buildfarm.instance.shard; +import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.common.redis.RedisMap; import java.util.Map; import java.util.Set; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.UnifiedJedis; /** * @class Operations @@ -28,6 +29,8 @@ * information about the operations that ran. */ public class Operations { + private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); + /** * @field operationIds * @brief A mapping from operationID -> operation @@ -54,7 +57,7 @@ public Operations(String name, int timeout_s) { * @note Overloaded. * @note Suggested return identifier: operation. */ - public String get(JedisCluster jedis, String operationId) { + public String get(UnifiedJedis jedis, String operationId) { return operationIds.get(jedis, operationId); } @@ -67,7 +70,7 @@ public String get(JedisCluster jedis, String operationId) { * @note Overloaded. * @note Suggested return identifier: operations. */ - public Iterable> get(JedisCluster jedis, Iterable searchIds) { + public Iterable> get(UnifiedJedis jedis, Iterable searchIds) { return operationIds.get(jedis, searchIds); } @@ -80,7 +83,7 @@ public Iterable> get(JedisCluster jedis, Iterable getByInvocationId(JedisCluster jedis, String invocationId) { + public Set getByInvocationId(UnifiedJedis jedis, String invocationId) { return jedis.smembers(invocationId); } @@ -93,12 +96,14 @@ public Set getByInvocationId(JedisCluster jedis, String invocationId) { * @param operation Json of the operation. */ public void insert( - JedisCluster jedis, String invocationId, String operationId, String operation) { + UnifiedJedis jedis, String invocationId, String operationId, String operation) { operationIds.insert(jedis, operationId, operation); // We also store a mapping from invocationID -> operationIDs // This is a common lookup that needs to be performant. - jedis.sadd(invocationId, operationId); + if (invocationId != "" && jedis.sadd(invocationId, operationId) == 1) { + jedis.expire(invocationId, configs.getBackplane().getMaxInvocationIdTimeout()); + } } /** @@ -107,7 +112,7 @@ public void insert( * @param jedis Jedis cluster client. * @param operationId The ID of the operation. */ - public void remove(JedisCluster jedis, String operationId) { + public void remove(UnifiedJedis jedis, String operationId) { operationIds.remove(jedis, operationId); } } diff --git a/src/main/java/build/buildfarm/instance/shard/RedisShardBackplane.java b/src/main/java/build/buildfarm/instance/shard/RedisShardBackplane.java index db26425261..a927aad21f 100644 --- a/src/main/java/build/buildfarm/instance/shard/RedisShardBackplane.java +++ b/src/main/java/build/buildfarm/instance/shard/RedisShardBackplane.java @@ -15,6 +15,8 @@ package build.buildfarm.instance.shard; import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.Digest; @@ -51,11 +53,16 @@ import build.buildfarm.v1test.GetClientStartTimeRequest; import build.buildfarm.v1test.GetClientStartTimeResult; import build.buildfarm.v1test.OperationChange; +import build.buildfarm.v1test.OperationQueueStatus; import build.buildfarm.v1test.QueueEntry; +import build.buildfarm.v1test.QueueStatus; import build.buildfarm.v1test.QueuedOperationMetadata; import build.buildfarm.v1test.ShardWorker; import build.buildfarm.v1test.WorkerChange; import build.buildfarm.v1test.WorkerType; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Suppliers; +import com.google.common.base.Throwables; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ListMultimap; @@ -73,17 +80,18 @@ import com.google.rpc.Code; import com.google.rpc.PreconditionFailure; import com.google.rpc.Status; +import io.grpc.Deadline; import java.io.IOException; import java.time.Instant; import java.util.ArrayList; -import java.util.Collections; import java.util.Date; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; @@ -93,12 +101,13 @@ import javax.annotation.Nullable; import javax.naming.ConfigurationException; import lombok.extern.java.Log; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.UnifiedJedis; @Log public class RedisShardBackplane implements Backplane { private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); + private static final int workerSetMaxAge = 3; // seconds private static final JsonFormat.Parser operationParser = JsonFormat.parser() .usingTypeRegistry( @@ -122,16 +131,12 @@ public class RedisShardBackplane implements Backplane { .add(PreconditionFailure.getDescriptor()) .build()); - private static class ActionAmounts { - Integer build = 0; - Integer test = 0; - Integer unknown = 0; - } - private final String source; // used in operation change publication + private final boolean subscribeToBackplane; + private final boolean runFailsafeOperation; private final Function onPublish; private final Function onComplete; - private final Supplier jedisClusterFactory; + private final Supplier jedisClusterFactory; private @Nullable InterruptingRunnable onUnsubscribe = null; private Thread subscriptionThread = null; @@ -139,30 +144,55 @@ private static class ActionAmounts { private RedisShardSubscriber subscriber = null; private RedisShardSubscription operationSubscription = null; private ExecutorService subscriberService = null; + private ExecutorService dequeueService = null; private @Nullable RedisClient client = null; - private final Set storageWorkerSet = Collections.synchronizedSet(new HashSet<>()); - private long workerSetExpiresAt = 0; + private Deadline storageWorkersDeadline = null; + private final Map storageWorkers = new ConcurrentHashMap<>(); + private final Supplier> recentExecuteWorkers; private DistributedState state = new DistributedState(); public RedisShardBackplane( String source, + boolean subscribeToBackplane, + boolean runFailsafeOperation, Function onPublish, Function onComplete) throws ConfigurationException { - this(source, onPublish, onComplete, JedisClusterFactory.create()); + this( + source, + subscribeToBackplane, + runFailsafeOperation, + onPublish, + onComplete, + JedisClusterFactory.create(source)); } public RedisShardBackplane( String source, + boolean subscribeToBackplane, + boolean runFailsafeOperation, Function onPublish, Function onComplete, - Supplier jedisClusterFactory) { + Supplier jedisClusterFactory) { this.source = source; + this.subscribeToBackplane = subscribeToBackplane; + this.runFailsafeOperation = runFailsafeOperation; this.onPublish = onPublish; this.onComplete = onComplete; this.jedisClusterFactory = jedisClusterFactory; + recentExecuteWorkers = + Suppliers.memoizeWithExpiration( + () -> { + try { + return client.call(this::fetchAndExpireExecuteWorkers).keySet(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, + workerSetMaxAge, + SECONDS); } @SuppressWarnings("NullableProblems") @@ -194,7 +224,7 @@ public void visit(String entry) { JsonFormat.parser().merge(entry, executeEntry); visit(executeEntry.build(), entry); } catch (InvalidProtocolBufferException e) { - log.log(Level.FINE, "invalid ExecuteEntry json: " + entry, e); + log.log(Level.FINER, "invalid ExecuteEntry json: " + entry, e); } } } @@ -210,7 +240,7 @@ private Instant convertToMilliInstant(String value, String key) { return null; } - private void scanProcessing(JedisCluster jedis, Consumer onOperationName, Instant now) { + private void scanProcessing(UnifiedJedis jedis, Consumer onOperationName, Instant now) { state.prequeue.visitDequeue( jedis, new ExecuteEntryListVisitor() { @@ -218,17 +248,20 @@ private void scanProcessing(JedisCluster jedis, Consumer onOperationName protected void visit(ExecuteEntry executeEntry, String executeEntryJson) { String operationName = executeEntry.getOperationName(); String value = state.processingOperations.get(jedis, operationName); - long defaultTimeout_ms = configs.getBackplane().getProcessingTimeoutMillis(); + long processingTimeout_ms = configs.getBackplane().getProcessingTimeoutMillis(); // get the operation's expiration Instant expiresAt = convertToMilliInstant(value, operationName); // if expiration is invalid, add a valid one. if (expiresAt == null) { - expiresAt = now.plusMillis(defaultTimeout_ms); + expiresAt = now.plusMillis(processingTimeout_ms); String keyValue = String.format("%d", expiresAt.toEpochMilli()); - long timeout_s = Time.millisecondsToSeconds(defaultTimeout_ms); - state.processingOperations.insert(jedis, operationName, keyValue, timeout_s); + // persist the flag for at least an hour, and at most 10 times longer than the timeout + // the key identifies so that we don't loop with the flag expired, resetting the + // unaccounted for operation + long expire_s = Math.max(3600, Time.millisecondsToSeconds(processingTimeout_ms) * 10); + state.processingOperations.insert(jedis, operationName, keyValue, expire_s); } // handle expiration @@ -243,7 +276,7 @@ protected void visit(ExecuteEntry executeEntry, String executeEntryJson) { }); } - private void scanDispatching(JedisCluster jedis, Consumer onOperationName, Instant now) { + private void scanDispatching(UnifiedJedis jedis, Consumer onOperationName, Instant now) { state.operationQueue.visitDequeue( jedis, new QueueEntryListVisitor() { @@ -251,17 +284,21 @@ private void scanDispatching(JedisCluster jedis, Consumer onOperationNam protected void visit(QueueEntry queueEntry, String queueEntryJson) { String operationName = queueEntry.getExecuteEntry().getOperationName(); String value = state.dispatchingOperations.get(jedis, operationName); - long defaultTimeout_ms = configs.getBackplane().getDispatchingTimeoutMillis(); + long dispatchingTimeout_ms = configs.getBackplane().getDispatchingTimeoutMillis(); // get the operation's expiration Instant expiresAt = convertToMilliInstant(value, operationName); // if expiration is invalid, add a valid one. if (expiresAt == null) { - expiresAt = now.plusMillis(defaultTimeout_ms); + expiresAt = now.plusMillis(dispatchingTimeout_ms); String keyValue = String.format("%d", expiresAt.toEpochMilli()); - long timeout_s = Time.millisecondsToSeconds(defaultTimeout_ms); - state.dispatchingOperations.insert(jedis, operationName, keyValue, timeout_s); + // persist the flag for at least an hour, and at most 10 times longer than the timeout + // the key identifies so that we don't loop with the flag expired, resetting the + // unaccounted for operation + long expire_s = + Math.max(3600, Time.millisecondsToSeconds(dispatchingTimeout_ms) * 10); + state.dispatchingOperations.insert(jedis, operationName, keyValue, expire_s); } // handle expiration @@ -276,7 +313,7 @@ protected void visit(QueueEntry queueEntry, String queueEntryJson) { }); } - private void scanPrequeue(JedisCluster jedis, Consumer onOperationName) { + private void scanPrequeue(UnifiedJedis jedis, Consumer onOperationName) { state.prequeue.visit( jedis, new ExecuteEntryListVisitor() { @@ -287,7 +324,7 @@ protected void visit(ExecuteEntry executeEntry, String executeEntryJson) { }); } - private void scanQueue(JedisCluster jedis, Consumer onOperationName) { + private void scanQueue(UnifiedJedis jedis, Consumer onOperationName) { state.operationQueue.visit( jedis, new QueueEntryListVisitor() { @@ -298,13 +335,13 @@ protected void visit(QueueEntry queueEntry, String queueEntryJson) { }); } - private void scanDispatched(JedisCluster jedis, Consumer onOperationName) { + private void scanDispatched(UnifiedJedis jedis, Consumer onOperationName) { for (String operationName : state.dispatchedOperations.keys(jedis)) { onOperationName.accept(operationName); } } - private void updateWatchers(JedisCluster jedis) { + private void updateWatchers(UnifiedJedis jedis) { Instant now = Instant.now(); Instant expiresAt = nextExpiresAt(now); Set expiringChannels = Sets.newHashSet(subscriber.expiredWatchedOperationChannels(now)); @@ -318,10 +355,10 @@ private void updateWatchers(JedisCluster jedis) { if (!expiringChannels.isEmpty()) { log.log( - Level.FINE, + Level.FINER, format("Scan %d watches, %s, expiresAt: %s", expiringChannels.size(), now, expiresAt)); - log.log(Level.FINE, "Scan prequeue"); + log.log(Level.FINER, "Scan prequeue"); // scan prequeue, pet watches scanPrequeue(jedis, resetChannel); } @@ -330,7 +367,7 @@ private void updateWatchers(JedisCluster jedis) { scanProcessing(jedis, resetChannel, now); if (!expiringChannels.isEmpty()) { - log.log(Level.FINE, "Scan queue"); + log.log(Level.FINER, "Scan queue"); // scan queue, pet watches scanQueue(jedis, resetChannel); } @@ -339,7 +376,7 @@ private void updateWatchers(JedisCluster jedis) { scanDispatching(jedis, resetChannel, now); if (!expiringChannels.isEmpty()) { - log.log(Level.FINE, "Scan dispatched"); + log.log(Level.FINER, "Scan dispatched"); // scan dispatched pet watches scanDispatched(jedis, resetChannel); } @@ -364,7 +401,7 @@ static String printOperationChange(OperationChange operationChange) } void publish( - JedisCluster jedis, + UnifiedJedis jedis, String channel, Instant effectiveAt, OperationChange.Builder operationChange) { @@ -379,7 +416,7 @@ void publish( } } - void publishReset(JedisCluster jedis, Operation operation) { + void publishReset(UnifiedJedis jedis, Operation operation) { Instant effectiveAt = Instant.now(); Instant expiresAt = nextExpiresAt(effectiveAt); publish( @@ -401,7 +438,7 @@ static Timestamp toTimestamp(Instant instant) { .build(); } - void publishExpiration(JedisCluster jedis, String channel, Instant effectiveAt) { + void publishExpiration(UnifiedJedis jedis, String channel, Instant effectiveAt) { publish( jedis, channel, @@ -411,7 +448,7 @@ void publishExpiration(JedisCluster jedis, String channel, Instant effectiveAt) } @SuppressWarnings({"unchecked", "rawtypes"}) - public void updateWatchedIfDone(JedisCluster jedis) { + public void updateWatchedIfDone(UnifiedJedis jedis) { List operationChannels = subscriber.watchedOperationChannels(); if (operationChannels.isEmpty()) { return; @@ -433,7 +470,7 @@ public void updateWatchedIfDone(JedisCluster jedis) { } subscriber.onOperation(operationChannel(operationName), operation, nextExpiresAt(now)); log.log( - Level.FINE, + Level.FINER, format( "operation %s done due to %s", operationName, operation == null ? "null" : "completed")); @@ -450,24 +487,22 @@ private void startSubscriptionThread() { Multimaps.synchronizedListMultimap( MultimapBuilder.linkedHashKeys().arrayListValues().build()); subscriberService = BuildfarmExecutors.getSubscriberPool(); + dequeueService = BuildfarmExecutors.getDequeuePool(); subscriber = new RedisShardSubscriber( - watchers, - storageWorkerSet, - configs.getBackplane().getWorkerChannel(), - subscriberService); + watchers, storageWorkers, configs.getBackplane().getWorkerChannel(), subscriberService); operationSubscription = new RedisShardSubscription( subscriber, - /* onUnsubscribe=*/ () -> { + /* onUnsubscribe= */ () -> { subscriptionThread = null; if (onUnsubscribe != null) { onUnsubscribe.runInterruptibly(); } }, - /* onReset=*/ this::updateWatchedIfDone, - /* subscriptions=*/ subscriber::subscribedChannels, + /* onReset= */ this::updateWatchedIfDone, + /* subscriptions= */ subscriber::subscribedChannels, client); // use Executors... @@ -483,7 +518,7 @@ private void startFailsafeOperationThread() { () -> { while (!Thread.currentThread().isInterrupted()) { try { - TimeUnit.SECONDS.sleep(10); + SECONDS.sleep(10); client.run(this::updateWatchers); } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -503,18 +538,23 @@ public void start(String clientPublicName) throws IOException { // Construct a single redis client to be used throughout the entire backplane. // We wish to avoid various synchronous and error handling issues that could occur when using // multiple clients. - client = - new RedisClient( - jedisClusterFactory, - configs.getBackplane().getReconnectClientAttempts(), - configs.getBackplane().getReconnectClientWaitDurationMs()); + start(new RedisClient(jedisClusterFactory.get()), clientPublicName); + } + + private void start(RedisClient client, String clientPublicName) throws IOException { // Create containers that make up the backplane - state = DistributedStateCreator.create(client); + start(client, DistributedStateCreator.create(client), clientPublicName); + } - if (configs.getBackplane().isSubscribeToBackplane()) { + @VisibleForTesting + void start(RedisClient client, DistributedState state, String clientPublicName) + throws IOException { + this.client = client; + this.state = state; + if (subscribeToBackplane) { startSubscriptionThread(); } - if (configs.getBackplane().isRunFailsafeOperation()) { + if (runFailsafeOperation) { startFailsafeOperationThread(); } @@ -529,24 +569,35 @@ public synchronized void stop() throws InterruptedException { if (failsafeOperationThread != null) { failsafeOperationThread.interrupt(); failsafeOperationThread.join(); - log.log(Level.FINE, "failsafeOperationThread has been stopped"); + log.log(Level.FINER, "failsafeOperationThread has been stopped"); } if (operationSubscription != null) { operationSubscription.stop(); if (subscriptionThread != null) { subscriptionThread.join(); } - log.log(Level.FINE, "subscriptionThread has been stopped"); + log.log(Level.FINER, "subscriptionThread has been stopped"); + } + if (dequeueService != null) { + dequeueService.shutdown(); + if (dequeueService.awaitTermination(10, SECONDS)) { + log.log(Level.FINER, "dequeueService has been stopped"); + } else { + log.log(Level.WARNING, "dequeueService has not stopped"); + } } if (subscriberService != null) { subscriberService.shutdown(); - subscriberService.awaitTermination(10, TimeUnit.SECONDS); - log.log(Level.FINE, "subscriberService has been stopped"); + if (subscriberService.awaitTermination(10, SECONDS)) { + log.log(Level.FINER, "subscriberService has been stopped"); + } else { + log.log(Level.WARNING, "subscriberService has not stopped"); + } } if (client != null) { client.close(); client = null; - log.log(Level.FINE, "client has been closed"); + log.log(Level.FINER, "client has been closed"); } } @@ -572,13 +623,14 @@ public void observe(Operation operation) { @Override public void addWorker(ShardWorker shardWorker) throws IOException { String json = JsonFormat.printer().print(shardWorker); + Timestamp effectiveAt = Timestamps.fromMillis(shardWorker.getFirstRegisteredAt()); String workerChangeJson = JsonFormat.printer() .print( WorkerChange.newBuilder() .setEffectiveAt(toTimestamp(Instant.now())) .setName(shardWorker.getEndpoint()) - .setAdd(WorkerChange.Add.getDefaultInstance()) + .setAdd(WorkerChange.Add.newBuilder().setEffectiveAt(effectiveAt).build()) .build()); client.call( jedis -> { @@ -592,7 +644,7 @@ public void addWorker(ShardWorker shardWorker) throws IOException { }); } - private boolean addWorkerByType(JedisCluster jedis, ShardWorker shardWorker, String json) { + private boolean addWorkerByType(UnifiedJedis jedis, ShardWorker shardWorker, String json) { int type = shardWorker.getWorkerType(); if (type == 0) { return false; // no destination @@ -607,12 +659,14 @@ private boolean addWorkerByType(JedisCluster jedis, ShardWorker shardWorker, Str return result; } - private boolean removeWorkerAndPublish(JedisCluster jedis, String name, String changeJson) { - if (state.storageWorkers.remove(jedis, name) || state.executeWorkers.remove(jedis, name)) { + private boolean removeWorkerAndPublish( + UnifiedJedis jedis, String name, String changeJson, boolean storage) { + boolean removedAny = state.executeWorkers.remove(jedis, name); + if (storage && state.storageWorkers.remove(jedis, name)) { jedis.publish(configs.getBackplane().getWorkerChannel(), changeJson); return true; } - return false; + return removedAny; } @SuppressWarnings("ConstantConditions") @@ -624,8 +678,9 @@ public boolean removeWorker(String name, String reason) throws IOException { .setRemove(WorkerChange.Remove.newBuilder().setSource(source).setReason(reason).build()) .build(); String workerChangeJson = JsonFormat.printer().print(workerChange); - return subscriber.removeWorker(name) - && client.call(jedis -> removeWorkerAndPublish(jedis, name, workerChangeJson)); + return storageWorkers.remove(name) != null + && client.call( + jedis -> removeWorkerAndPublish(jedis, name, workerChangeJson, /* storage= */ true)); } @SuppressWarnings("ConstantConditions") @@ -672,23 +727,57 @@ public void deregisterWorker(String workerName) throws IOException { removeWorker(workerName, "Requested shutdown"); } - @SuppressWarnings("ConstantConditions") + /** + * Returns a new set containing copies of the storage workers. Note: This method does not grant + * access to the shared storage set. + */ @Override - public synchronized Set getStorageWorkers() throws IOException { - long now = System.currentTimeMillis(); - if (now < workerSetExpiresAt) { - return new HashSet<>(storageWorkerSet); - } + public Set getStorageWorkers() throws IOException { + refreshStorageWorkersIfExpired(); + return new HashSet<>(storageWorkers.keySet()); + } + + @Override + public Map getWorkersStartTimeInEpochSecs(Set workerNames) + throws IOException { + refreshStorageWorkersIfExpired(); + Map workerAndStartTime = new HashMap<>(); + workerNames.forEach( + worker -> { + ShardWorker workerInfo = storageWorkers.get(worker); + if (workerInfo != null) { + workerAndStartTime.put( + worker, MILLISECONDS.toSeconds(workerInfo.getFirstRegisteredAt())); + } + }); + return workerAndStartTime; + } - synchronized (storageWorkerSet) { - Set newWorkerSet = client.call(jedis -> fetchAndExpireStorageWorkers(jedis, now)); - storageWorkerSet.clear(); - storageWorkerSet.addAll(newWorkerSet); + private synchronized void refreshStorageWorkersIfExpired() throws IOException { + if (storageWorkersDeadline == null || storageWorkersDeadline.isExpired()) { + synchronized (storageWorkers) { + Map newWorkers = client.call(this::fetchAndExpireStorageWorkers); + storageWorkers.clear(); + storageWorkers.putAll(newWorkers); + } + storageWorkersDeadline = Deadline.after(workerSetMaxAge, SECONDS); } + } - // fetch every 3 seconds - workerSetExpiresAt = now + 3000; - return new HashSet<>(storageWorkerSet); + @Override + public long getDigestInsertTime(Digest blobDigest) throws IOException { + return state.casWorkerMap.insertTime(client, blobDigest); + } + + private synchronized Set getExecuteWorkers() throws IOException { + try { + return recentExecuteWorkers.get(); + } catch (RuntimeException e) { + // unwrap checked exception mask + Throwable cause = e.getCause(); + Throwables.throwIfInstanceOf(cause, IOException.class); + throw e; + } } @Override @@ -721,7 +810,8 @@ public static List randomN(List list, int n) { .collect(Collectors.toList()); } - private void removeInvalidWorkers(JedisCluster jedis, long testedAt, List workers) { + private void removeInvalidWorkers( + UnifiedJedis jedis, long testedAt, List workers, boolean storage) { if (!workers.isEmpty()) { for (ShardWorker worker : workers) { String name = worker.getEndpoint(); @@ -736,7 +826,7 @@ private void removeInvalidWorkers(JedisCluster jedis, long testedAt, List fetchAndExpireStorageWorkers(JedisCluster jedis, long now) { - Set returnWorkers = Sets.newConcurrentHashSet(); + private Map fetchAndExpireStorageWorkers(UnifiedJedis jedis) { + return fetchAndExpireWorkers(jedis, state.storageWorkers.asMap(jedis), /* storage= */ true); + } + + private Map fetchAndExpireExecuteWorkers(UnifiedJedis jedis) { + return fetchAndExpireWorkers(jedis, state.executeWorkers.asMap(jedis), /* storage= */ false); + } + + private Map fetchAndExpireWorkers( + UnifiedJedis jedis, Map workers, boolean publish) { + long now = System.currentTimeMillis(); + Map returnWorkers = Maps.newConcurrentMap(); ImmutableList.Builder invalidWorkers = ImmutableList.builder(); - for (Map.Entry entry : state.storageWorkers.asMap(jedis).entrySet()) { + for (Map.Entry entry : workers.entrySet()) { String json = entry.getValue(); String name = entry.getKey(); try { @@ -760,14 +860,14 @@ private Set fetchAndExpireStorageWorkers(JedisCluster jedis, long now) { if (worker.getExpireAt() <= now) { invalidWorkers.add(worker); } else { - returnWorkers.add(worker.getEndpoint()); + returnWorkers.put(worker.getEndpoint(), worker); } } } catch (InvalidProtocolBufferException e) { invalidWorkers.add(ShardWorker.newBuilder().setEndpoint(name).build()); } } - removeInvalidWorkers(jedis, now, invalidWorkers.build()); + removeInvalidWorkers(jedis, now, invalidWorkers.build(), publish); return returnWorkers; } @@ -819,7 +919,7 @@ public void putActionResult(ActionKey actionKey, ActionResult actionResult) thro configs.getBackplane().getActionCacheExpire())); } - private void removeActionResult(JedisCluster jedis, ActionKey actionKey) { + private void removeActionResult(UnifiedJedis jedis, ActionKey actionKey) { state.actionCache.remove(jedis, asDigestStr(actionKey)); } @@ -919,7 +1019,7 @@ public Iterable> getOperations(Set operationId }); } - private String getOperation(JedisCluster jedis, String operationName) { + private String getOperation(UnifiedJedis jedis, String operationName) { return state.operations.get(jedis, operationName); } @@ -973,7 +1073,7 @@ public boolean putOperation(Operation operation, ExecutionStage.Value stage) thr } private void queue( - JedisCluster jedis, + UnifiedJedis jedis, String operationName, List provisions, String queueEntryJson, @@ -1125,8 +1225,8 @@ public ImmutableList getDispatchedOperations() throws IOExc return builder.build(); } - private ExecuteEntry deprequeueOperation(JedisCluster jedis) throws InterruptedException { - String executeEntryJson = state.prequeue.dequeue(jedis); + private ExecuteEntry deprequeueOperation(UnifiedJedis jedis) throws InterruptedException { + String executeEntryJson = state.prequeue.take(jedis, dequeueService); if (executeEntryJson == null) { return null; } @@ -1163,7 +1263,7 @@ public ExecuteEntry deprequeueOperation() throws IOException, InterruptedExcepti } private @Nullable QueueEntry dispatchOperation( - JedisCluster jedis, List provisions) throws InterruptedException { + UnifiedJedis jedis, List provisions) throws InterruptedException { String queueEntryJson = state.operationQueue.dequeue(jedis, provisions); if (queueEntryJson == null) { return null; @@ -1267,7 +1367,7 @@ public boolean pollOperation(QueueEntry queueEntry, ExecutionStage.Value stage, return client.call(jedis -> pollOperation(jedis, operationName, json)); } - boolean pollOperation(JedisCluster jedis, String operationName, String dispatchedOperationJson) { + boolean pollOperation(UnifiedJedis jedis, String operationName, String dispatchedOperationJson) { if (state.dispatchedOperations.exists(jedis, operationName)) { if (!state.dispatchedOperations.insert(jedis, operationName, dispatchedOperationJson)) { return true; @@ -1290,7 +1390,7 @@ public void prequeue(ExecuteEntry executeEntry, Operation operation) throws IOEx client.run( jedis -> { state.operations.insert(jedis, invocationId, operationName, operationJson); - state.prequeue.push(jedis, executeEntryJson, priority); + state.prequeue.offer(jedis, executeEntryJson, priority); publishReset(jedis, publishOperation); }); } @@ -1326,7 +1426,7 @@ public void requeueDispatchedOperation(QueueEntry queueEntry) throws IOException }); } - private void completeOperation(JedisCluster jedis, String operationName) { + private void completeOperation(UnifiedJedis jedis, String operationName) { state.dispatchedOperations.remove(jedis, operationName); } @@ -1384,7 +1484,7 @@ public boolean isBlacklisted(RequestMetadata requestMetadata) throws IOException return client.call(jedis -> isBlacklisted(jedis, requestMetadata)); } - private boolean isBlacklisted(JedisCluster jedis, RequestMetadata requestMetadata) { + private boolean isBlacklisted(UnifiedJedis jedis, RequestMetadata requestMetadata) { boolean isActionBlocked = (!requestMetadata.getActionId().isEmpty() && state.blockedActions.exists(jedis, requestMetadata.getActionId())); @@ -1409,15 +1509,19 @@ public boolean canPrequeue() throws IOException { @SuppressWarnings("ConstantConditions") @Override public BackplaneStatus backplaneStatus() throws IOException { - BackplaneStatus.Builder builder = BackplaneStatus.newBuilder(); - builder.addAllActiveWorkers( - client.call( - jedis -> - Sets.union(state.executeWorkers.keys(jedis), state.storageWorkers.keys(jedis)))); - builder.setDispatchedSize(client.call(jedis -> state.dispatchedOperations.size(jedis))); - builder.setOperationQueue(state.operationQueue.status(client.call(jedis -> jedis))); - builder.setPrequeue(state.prequeue.status(client.call(jedis -> jedis))); - return builder.build(); + Set executeWorkers = getExecuteWorkers(); + Set storageWorkers = getStorageWorkers(); + OperationQueueStatus operationQueueStatus = + client.call(jedis -> state.operationQueue.status(jedis)); + QueueStatus prequeueStatus = client.call(jedis -> state.prequeue.status(jedis)); + return BackplaneStatus.newBuilder() + .addAllActiveExecuteWorkers(executeWorkers) + .addAllActiveStorageWorkers(storageWorkers) + .addAllActiveWorkers(Sets.union(executeWorkers, storageWorkers)) + .setDispatchedSize(client.call(jedis -> state.dispatchedOperations.size(jedis))) + .setOperationQueue(operationQueueStatus) + .setPrequeue(prequeueStatus) + .build(); } @SuppressWarnings("ConstantConditions") @@ -1440,4 +1544,9 @@ public GetClientStartTimeResult getClientStartTime(GetClientStartTimeRequest req } return GetClientStartTimeResult.newBuilder().addAllClientStartTime(startTimes).build(); } + + @Override + public void updateDigestsExpiry(Iterable digests) throws IOException { + state.casWorkerMap.setExpire(client, digests); + } } diff --git a/src/main/java/build/buildfarm/instance/shard/RedisShardSubscriber.java b/src/main/java/build/buildfarm/instance/shard/RedisShardSubscriber.java index 6ecfdc05d4..edd2bfdfae 100644 --- a/src/main/java/build/buildfarm/instance/shard/RedisShardSubscriber.java +++ b/src/main/java/build/buildfarm/instance/shard/RedisShardSubscriber.java @@ -20,6 +20,7 @@ import build.buildfarm.instance.server.WatchFuture; import build.buildfarm.v1test.OperationChange; +import build.buildfarm.v1test.ShardWorker; import build.buildfarm.v1test.WorkerChange; import com.google.common.collect.ImmutableList; import com.google.common.collect.ListMultimap; @@ -27,16 +28,17 @@ import com.google.longrunning.Operation; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Timestamp; +import com.google.protobuf.util.Timestamps; import java.time.Instant; import java.util.List; -import java.util.Set; +import java.util.Map; import java.util.concurrent.Executor; import java.util.function.Consumer; import java.util.function.Predicate; import java.util.logging.Level; import javax.annotation.Nullable; import lombok.extern.java.Log; -import redis.clients.jedis.Client; +import redis.clients.jedis.Connection; import redis.clients.jedis.JedisPubSub; @Log @@ -59,13 +61,13 @@ void complete() { } private final ListMultimap watchers; - private final Set workers; + private final Map workers; private final String workerChannel; private final Executor executor; RedisShardSubscriber( ListMultimap watchers, - Set workers, + Map workers, String workerChannel, Executor executor) { this.watchers = watchers; @@ -103,41 +105,12 @@ public List expiredWatchedOperationChannels(Instant now) { return builder.build(); } - // synchronizing on these because the client has been observed to - // cause protocol desynchronization for multiple concurrent calls - @Override - public synchronized void unsubscribe() { - if (isSubscribed()) { - super.unsubscribe(); - } - } - - @Override - public synchronized void unsubscribe(String... channels) { - super.unsubscribe(channels); - } - - @Override - public synchronized void subscribe(String... channels) { - super.subscribe(channels); - } - - @Override - public synchronized void psubscribe(String... patterns) { - super.psubscribe(patterns); - } - - @Override - public synchronized void punsubscribe(String... patterns) { - super.punsubscribe(patterns); - } - public ListenableFuture watch(String channel, TimedWatcher watcher) { TimedWatchFuture watchFuture = new TimedWatchFuture(watcher) { @Override public void unwatch() { - log.log(Level.FINE, format("unwatching %s", channel)); + log.log(Level.FINER, format("unwatching %s", channel)); RedisShardSubscriber.this.unwatch(channel, this); } }; @@ -173,7 +146,7 @@ public void resetWatchers(String channel, Instant expiresAt) { private void terminateExpiredWatchers(String channel, Instant now, boolean force) { onOperation( channel, - /* operation=*/ null, + /* operation= */ null, (watcher) -> { boolean expired = force || watcher.isExpiredAt(now); if (expired) { @@ -185,7 +158,7 @@ private void terminateExpiredWatchers(String channel, Instant now, boolean force } return expired; }, - /* expiresAt=*/ null); + /* expiresAt= */ null); } public void onOperation(String channel, Operation operation, Instant expiresAt) { @@ -199,7 +172,7 @@ private void onOperation( @Nullable Instant expiresAt) { List operationWatchers = watchers.get(channel); boolean observe = operation == null || operation.hasMetadata() || operation.getDone(); - log.log(Level.FINE, format("onOperation %s: %s", channel, operation)); + log.log(Level.FINER, format("onOperation %s: %s", channel, operation)); synchronized (watchers) { ImmutableList.Builder> observers = ImmutableList.builder(); for (TimedWatchFuture watchFuture : operationWatchers) { @@ -215,7 +188,7 @@ private void onOperation( executor.execute( () -> { if (observe) { - log.log(Level.FINE, "observing " + operation); + log.log(Level.FINER, "observing " + operation); observer.accept(operation); } }); @@ -250,23 +223,28 @@ void onWorkerChange(WorkerChange workerChange) { workerChange.getName(), workerChange.getEffectiveAt())); break; case ADD: - addWorker(workerChange.getName()); + addWorker(workerChange); break; case REMOVE: - removeWorker(workerChange.getName()); + removeWorker(workerChange); break; } } - void addWorker(String worker) { + void addWorker(WorkerChange workerChange) { synchronized (workers) { - workers.add(worker); + workers.put( + workerChange.getName(), + ShardWorker.newBuilder() + .setEndpoint(workerChange.getName()) + .setFirstRegisteredAt(Timestamps.toMillis(workerChange.getAdd().getEffectiveAt())) + .build()); } } - boolean removeWorker(String worker) { + boolean removeWorker(WorkerChange workerChange) { synchronized (workers) { - return workers.remove(worker); + return workers.remove(workerChange.getName()) != null; } } @@ -330,11 +308,10 @@ private String[] placeholderChannel() { return channels; } - @Override - public void proceed(Client client, String... channels) { + public void start(Connection client, String... channels) { if (channels.length == 0) { channels = placeholderChannel(); } - super.proceed(client, channels); + proceed(client, channels); } } diff --git a/src/main/java/build/buildfarm/instance/shard/RedisShardSubscription.java b/src/main/java/build/buildfarm/instance/shard/RedisShardSubscription.java index 39e62e141d..847d7920dc 100644 --- a/src/main/java/build/buildfarm/instance/shard/RedisShardSubscription.java +++ b/src/main/java/build/buildfarm/instance/shard/RedisShardSubscription.java @@ -24,14 +24,14 @@ import java.util.function.Supplier; import java.util.logging.Level; import lombok.extern.java.Log; -import redis.clients.jedis.JedisCluster; import redis.clients.jedis.JedisPubSub; +import redis.clients.jedis.UnifiedJedis; @Log class RedisShardSubscription implements Runnable { private final JedisPubSub subscriber; private final InterruptingRunnable onUnsubscribe; - private final Consumer onReset; + private final Consumer onReset; private final Supplier> subscriptions; private final RedisClient client; private final AtomicBoolean stopped = new AtomicBoolean(false); @@ -39,7 +39,7 @@ class RedisShardSubscription implements Runnable { RedisShardSubscription( JedisPubSub subscriber, InterruptingRunnable onUnsubscribe, - Consumer onReset, + Consumer onReset, Supplier> subscriptions, RedisClient client) { this.subscriber = subscriber; @@ -53,7 +53,7 @@ public JedisPubSub getSubscriber() { return subscriber; } - private void subscribe(JedisCluster jedis, boolean isReset) { + private void subscribe(UnifiedJedis jedis, boolean isReset) { if (isReset) { onReset.accept(jedis); } diff --git a/src/main/java/build/buildfarm/instance/shard/RedissonCasWorkerMap.java b/src/main/java/build/buildfarm/instance/shard/RedissonCasWorkerMap.java index 8800a051da..52b010c31f 100644 --- a/src/main/java/build/buildfarm/instance/shard/RedissonCasWorkerMap.java +++ b/src/main/java/build/buildfarm/instance/shard/RedissonCasWorkerMap.java @@ -18,6 +18,7 @@ import build.buildfarm.common.DigestUtil; import build.buildfarm.common.redis.RedisClient; import com.google.common.collect.ImmutableMap; +import java.time.Instant; import java.util.Map; import java.util.Random; import java.util.Set; @@ -169,6 +170,12 @@ public Set get(RedisClient client, Digest blobDigest) { return cacheMap.get(key).readAll(); } + @Override + public long insertTime(RedisClient client, Digest blobDigest) { + String key = cacheMapCasKey(blobDigest); + return Instant.now().getEpochSecond() - keyExpiration_s + cacheMap.get(key).remainTimeToLive(); + } + /** * @brief Get all of the key values as a map from the digests given. * @details If there are no workers for the digest, the key is left out of the returned map. @@ -202,6 +209,14 @@ public int size(RedisClient client) { return cacheMap.size(); } + @Override + public void setExpire(RedisClient client, Iterable blobDigests) { + for (Digest blobDigest : blobDigests) { + String key = cacheMapCasKey(blobDigest); + cacheMap.expireKey(key, keyExpiration_s, TimeUnit.SECONDS); + } + } + /** * @brief Get a random element from the set. * @details Assumes the set is not empty. diff --git a/src/main/java/build/buildfarm/instance/shard/RemoteInputStreamFactory.java b/src/main/java/build/buildfarm/instance/shard/RemoteInputStreamFactory.java index 640878707f..e3c8b5a4ca 100644 --- a/src/main/java/build/buildfarm/instance/shard/RemoteInputStreamFactory.java +++ b/src/main/java/build/buildfarm/instance/shard/RemoteInputStreamFactory.java @@ -30,7 +30,7 @@ import build.buildfarm.common.DigestUtil; import build.buildfarm.common.InputStreamFactory; import build.buildfarm.instance.Instance; -import build.buildfarm.instance.shard.ShardInstance.WorkersCallback; +import build.buildfarm.instance.shard.ServerInstance.WorkersCallback; import com.google.common.base.Throwables; import com.google.common.cache.LoadingCache; import com.google.common.collect.Iterables; @@ -72,7 +72,7 @@ public interface UnavailableConsumer { Random rand, LoadingCache workerStubs, UnavailableConsumer onUnavailable) { - this(/* publicName=*/ null, backplane, rand, workerStubs, onUnavailable); + this(/* publicName= */ null, backplane, rand, workerStubs, onUnavailable); } @SuppressWarnings("NullableProblems") diff --git a/src/main/java/build/buildfarm/instance/shard/ShardInstance.java b/src/main/java/build/buildfarm/instance/shard/ServerInstance.java similarity index 88% rename from src/main/java/build/buildfarm/instance/shard/ShardInstance.java rename to src/main/java/build/buildfarm/instance/shard/ServerInstance.java index 482abbaca9..e4e1aaf688 100644 --- a/src/main/java/build/buildfarm/instance/shard/ShardInstance.java +++ b/src/main/java/build/buildfarm/instance/shard/ServerInstance.java @@ -47,6 +47,7 @@ import build.bazel.remote.execution.v2.Action; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response; +import build.bazel.remote.execution.v2.CacheCapabilities; import build.bazel.remote.execution.v2.Command; import build.bazel.remote.execution.v2.Compressor; import build.bazel.remote.execution.v2.Digest; @@ -61,6 +62,7 @@ import build.bazel.remote.execution.v2.Platform.Property; import build.bazel.remote.execution.v2.RequestMetadata; import build.bazel.remote.execution.v2.ResultsCachePolicy; +import build.bazel.remote.execution.v2.SymlinkAbsolutePathStrategy; import build.buildfarm.actioncache.ActionCache; import build.buildfarm.actioncache.ShardActionCache; import build.buildfarm.backplane.Backplane; @@ -78,9 +80,10 @@ import build.buildfarm.common.Write; import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.common.grpc.UniformDelegateServerCallStreamObserver; +import build.buildfarm.common.redis.RedisHashtags; import build.buildfarm.instance.Instance; import build.buildfarm.instance.MatchListener; -import build.buildfarm.instance.server.AbstractServerInstance; +import build.buildfarm.instance.server.NodeInstance; import build.buildfarm.operations.EnrichedOperation; import build.buildfarm.operations.FindOperationsResults; import build.buildfarm.v1test.BackplaneStatus; @@ -135,10 +138,13 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.time.Instant; +import java.util.AbstractMap; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; import java.util.Deque; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -167,7 +173,7 @@ import lombok.extern.java.Log; @Log -public class ShardInstance extends AbstractServerInstance { +public class ServerInstance extends NodeInstance { private static final ListenableFuture IMMEDIATE_VOID_FUTURE = Futures.immediateFuture(null); private static final String TIMEOUT_OUT_OF_BOUNDS = @@ -207,6 +213,16 @@ public class ShardInstance extends AbstractServerInstance { // Other metrics from the backplane private static final Gauge workerPoolSize = Gauge.build().name("worker_pool_size").help("Active worker pool size.").register(); + private static final Gauge storageWorkerPoolSize = + Gauge.build() + .name("storage_worker_pool_size") + .help("Active storage worker pool size.") + .register(); + private static final Gauge executeWorkerPoolSize = + Gauge.build() + .name("execute_worker_pool_size") + .help("Active execute worker pool size.") + .register(); private static final Gauge queueSize = Gauge.build().name("queue_size").labelNames("queue_name").help("Queue size.").register(); @@ -231,7 +247,7 @@ public class ShardInstance extends AbstractServerInstance { private Cache recentCacheServedExecutions; private final Random rand = new Random(); - private final Writes writes; + private final Writes writes = new Writes(this::writeInstanceSupplier); private final int maxCpu; private final int maxRequeueAttempts; @@ -256,23 +272,27 @@ public class ShardInstance extends AbstractServerInstance { private static Backplane createBackplane(String identifier) throws ConfigurationException { if (configs.getBackplane().getType().equals(SHARD)) { return new RedisShardBackplane( - identifier, ShardInstance::stripOperation, ShardInstance::stripQueuedOperation); + identifier, + /* subscribeToBackplane= */ true, + configs.getServer().isRunFailsafeOperation(), + ServerInstance::stripOperation, + ServerInstance::stripQueuedOperation); } else { throw new IllegalArgumentException("Shard Backplane not set in config"); } } - public ShardInstance(String name, String identifier, DigestUtil digestUtil, Runnable onStop) + public ServerInstance(String name, String identifier, DigestUtil digestUtil, Runnable onStop) throws InterruptedException, ConfigurationException { this( name, digestUtil, createBackplane(identifier), onStop, - /* actionCacheFetchService=*/ BuildfarmExecutors.getActionCacheFetchServicePool()); + /* actionCacheFetchService= */ BuildfarmExecutors.getActionCacheFetchServicePool()); } - private ShardInstance( + private ServerInstance( String name, DigestUtil digestUtil, Backplane backplane, @@ -324,7 +344,7 @@ void initializeCaches() { .build(); } - public ShardInstance( + public ServerInstance( String name, DigestUtil digestUtil, Backplane backplane, @@ -344,11 +364,11 @@ public ShardInstance( super( name, digestUtil, - /* contentAddressableStorage=*/ null, - /* actionCache=*/ actionCache, - /* outstandingOperations=*/ null, - /* completedOperations=*/ null, - /* activeBlobWrites=*/ null, + /* contentAddressableStorage= */ null, + /* actionCache= */ actionCache, + /* outstandingOperations= */ null, + /* completedOperations= */ null, + /* activeBlobWrites= */ null, ensureOutputsPresent); this.backplane = backplane; this.actionCache = actionCache; @@ -362,7 +382,6 @@ public ShardInstance( this.actionCacheFetchService = actionCacheFetchService; backplane.setOnUnsubscribe(this::stop); - this.writes = new Writes(writeInstanceCacheLoader()); initializeCaches(); remoteInputStreamFactory = @@ -415,14 +434,14 @@ ListenableFuture iterate() throws IOException, InterruptedException { () -> {}, Deadline.after(5, MINUTES)); try { - log.log(Level.FINE, "queueing " + operationName); + log.log(Level.FINER, "queueing " + operationName); ListenableFuture queueFuture = queue(executeEntry, poller, queueTimeout); addCallback( queueFuture, new FutureCallback() { @Override public void onSuccess(Void result) { - log.log(Level.FINE, "successfully queued " + operationName); + log.log(Level.FINER, "successfully queued " + operationName); // nothing } @@ -436,9 +455,10 @@ public void onFailure(Throwable t) { long operationTransformDispatchUSecs = stopwatch.elapsed(MICROSECONDS) - canQueueUSecs; log.log( - Level.FINE, + Level.FINER, format( - "OperationQueuer: Dispatched To Transform %s: %dus in canQueue, %dus in transform dispatch", + "OperationQueuer: Dispatched To Transform %s: %dus in canQueue, %dus in" + + " transform dispatch", operationName, canQueueUSecs, operationTransformDispatchUSecs)); return queueFuture; } catch (Throwable t) { @@ -451,10 +471,9 @@ public void onFailure(Throwable t) { @Override public void run() { - log.log(Level.FINE, "OperationQueuer: Running"); + log.log(Level.FINER, "OperationQueuer: Running"); try { - for (; ; ) { - transformTokensQueue.put(new Object()); + while (transformTokensQueue.offer(new Object(), 5, MINUTES)) { stopwatch.start(); try { iterate() @@ -477,6 +496,7 @@ public void run() { stopwatch.reset(); } } + log.severe("OperationQueuer: Transform lease token timed out"); } catch (InterruptedException e) { // treat with exit operationQueuer = null; @@ -484,7 +504,7 @@ public void run() { } catch (Exception t) { log.log(Level.SEVERE, "OperationQueuer: fatal exception during iteration", t); } finally { - log.log(Level.FINE, "OperationQueuer: Exiting"); + log.log(Level.FINER, "OperationQueuer: Exiting"); } operationQueuer = null; try { @@ -506,6 +526,8 @@ public void run() { TimeUnit.SECONDS.sleep(30); BackplaneStatus backplaneStatus = backplaneStatus(); workerPoolSize.set(backplaneStatus.getActiveWorkersCount()); + executeWorkerPoolSize.set(backplaneStatus.getActiveExecuteWorkersCount()); + storageWorkerPoolSize.set(backplaneStatus.getActiveStorageWorkersCount()); dispatchedOperationsSize.set(backplaneStatus.getDispatchedSize()); preQueueSize.set(backplaneStatus.getPrequeue().getSize()); updateQueueSizes(backplaneStatus.getOperationQueue().getProvisionsList()); @@ -523,7 +545,9 @@ public void run() { private void updateQueueSizes(List queues) { if (queueSize != null) { for (QueueStatus queueStatus : queues) { - queueSize.labels(queueStatus.getName()).set(queueStatus.getSize()); + queueSize + .labels(RedisHashtags.unhashedName(queueStatus.getName())) + .set(queueStatus.getSize()); } } } @@ -569,12 +593,14 @@ public void stop() throws InterruptedException { return; } stopping = true; - log.log(Level.FINE, format("Instance %s is stopping", getName())); + log.log(Level.FINER, format("Instance %s is stopping", getName())); if (operationQueuer != null) { - operationQueuer.stop(); + operationQueuer.interrupt(); + operationQueuer.join(); } if (dispatchedMonitor != null) { dispatchedMonitor.interrupt(); + dispatchedMonitor.join(); } if (prometheusMetricsThread != null) { prometheusMetricsThread.interrupt(); @@ -586,9 +612,7 @@ public void stop() throws InterruptedException { onStop.run(); backplane.stop(); if (!contextDeadlineScheduler.awaitTermination(10, SECONDS)) { - log.log( - Level.SEVERE, - "Could not shut down operation deletion service, some operations may be zombies"); + log.log(Level.SEVERE, "Could not shut down context deadline scheduler"); } if (!operationDeletionService.awaitTermination(10, SECONDS)) { log.log( @@ -605,7 +629,7 @@ public void stop() throws InterruptedException { } actionCacheFetchService.shutdownNow(); workerStubs.invalidateAll(); - log.log(Level.FINE, format("Instance %s has been stopped", getName())); + log.log(Level.FINER, format("Instance %s has been stopped", getName())); stopping = false; stopped = true; } @@ -646,51 +670,42 @@ public ListenableFuture> findMissingBlobs( return immediateFailedFuture(Status.fromThrowable(e).asException()); } - // Empty blobs are an exceptional case. Filter them out. - // If the user only requested empty blobs we can immedaitely tell them we already have it. + // Empty blobs are an exceptional case. Filter them out. + // If the user only requested empty blobs we can immediately tell them we already have it. Iterable nonEmptyDigests = Iterables.filter(blobDigests, (digest) -> digest.getSizeBytes() != 0); if (Iterables.isEmpty(nonEmptyDigests)) { return immediateFuture(ImmutableList.of()); } - // This is a faster strategy to check missing blobs which does not require querying the CAS. - // With hundreds of worker machines, it may be too expensive to query all of them for "find - // missing blobs". - // Workers register themselves with the backplane for a 30-second window, and if they fail to - // re-register within this time frame, they are automatically removed from the backplane. While - // this alternative strategy for finding missing blobs is faster and more cost-effective than - // the exhaustive approach of querying each worker to find the digest, it comes with a higher - // risk of returning expired workers despite filtering by active workers below. This is because - // the strategy may return workers that have expired in the last 30 seconds. However, checking - // workers directly is not a guarantee either since workers could leave the cluster after being - // queried. Ultimitely, it will come down to the client's resiliency if the backplane is - // out-of-date and the server lies about which blobs are actually present. We provide this - // alternative strategy for calculating missing blobs. - if (configs.getServer().isFindMissingBlobsViaBackplane()) { - try { - Set uniqueDigests = new HashSet<>(); - nonEmptyDigests.forEach(uniqueDigests::add); - Map> foundBlobs = backplane.getBlobDigestsWorkers(uniqueDigests); - Set workerSet = backplane.getStorageWorkers(); - return immediateFuture( - uniqueDigests.stream() - .filter( // best effort to present digests only missing on active workers - digest -> - Sets.intersection( - foundBlobs.getOrDefault(digest, Collections.emptySet()), workerSet) - .isEmpty()) - .collect(Collectors.toList())); - } catch (Exception e) { - return immediateFailedFuture(Status.fromThrowable(e).asException()); - } + return findMissingBlobsViaBackplane(nonEmptyDigests, requestMetadata); } - // A more accurate way to verify missing blobs is to ask the CAS participants directly if they - // have the blobs. To do this, we get all of the worker nodes that are particpating in the CAS - // as a random list to begin our search. If there are no workers avaiable, tell the client all - // blobs are missing. + return findMissingBlobsQueryingEachWorker(nonEmptyDigests, requestMetadata); + } + + class FindMissingResponseEntry { + final String worker; + final long elapsedMicros; + final Throwable exception; + final int stillMissingAfter; + + FindMissingResponseEntry( + String worker, long elapsedMicros, Throwable exception, int stillMissingAfter) { + this.worker = worker; + this.elapsedMicros = elapsedMicros; + this.exception = exception; + this.stillMissingAfter = stillMissingAfter; + } + } + + // A more accurate way to verify missing blobs is to ask the CAS participants directly if they + // have the blobs. To do this, we get all the worker nodes that are participating in the CAS + // as a random list to begin our search. If there are no workers available, tell the client all + // blobs are missing. + private ListenableFuture> findMissingBlobsQueryingEachWorker( + Iterable nonEmptyDigests, RequestMetadata requestMetadata) { Deque workers; try { List workersList = new ArrayList<>(backplane.getStorageWorkers()); @@ -703,7 +718,7 @@ public ListenableFuture> findMissingBlobs( return immediateFuture(nonEmptyDigests); } - // Search through all of the workers to decide how many CAS blobs are missing. + // Search through all of the workers to decide which CAS blobs are missing. SettableFuture> missingDigestsFuture = SettableFuture.create(); findMissingBlobsOnWorker( UUID.randomUUID().toString(), @@ -717,18 +732,118 @@ public ListenableFuture> findMissingBlobs( return missingDigestsFuture; } - class FindMissingResponseEntry { - final String worker; - final long elapsedMicros; - final Throwable exception; - final int stillMissingAfter; + // This is a faster strategy to check missing blobs which does not require querying the CAS. + // With hundreds of worker machines, it may be too expensive to query all of them for "find + // missing blobs". + // Workers register themselves with the backplane for a 30-second window, and if they fail to + // re-register within this time frame, they are automatically removed from the backplane. While + // this alternative strategy for finding missing blobs is faster and more cost-effective than + // the exhaustive approach of querying each worker to find the digest, it comes with a higher + // risk of returning expired workers despite filtering by active workers below. This is because + // the strategy may return workers that have expired in the last 30 seconds. However, checking + // workers directly is not a guarantee either since workers could leave the cluster after being + // queried. Ultimately, it will come down to the client's resiliency if the backplane is + // out-of-date and the server lies about which blobs are actually present. We provide this + // alternative strategy for calculating missing blobs. + private ListenableFuture> findMissingBlobsViaBackplane( + Iterable nonEmptyDigests, RequestMetadata requestMetadata) { + try { + Set uniqueDigests = new HashSet<>(); + nonEmptyDigests.forEach(uniqueDigests::add); + Map> foundBlobs = backplane.getBlobDigestsWorkers(uniqueDigests); + Set workerSet = backplane.getStorageWorkers(); + Map workersStartTime = backplane.getWorkersStartTimeInEpochSecs(workerSet); + Map> digestAndWorkersMap = + uniqueDigests.stream() + .map( + digest -> { + Set initialWorkers = + foundBlobs.getOrDefault(digest, Collections.emptySet()); + return new AbstractMap.SimpleEntry<>( + digest, + filterAndAdjustWorkersForDigest( + digest, initialWorkers, workerSet, workersStartTime)); + }) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + ListenableFuture> missingDigestFuture = + immediateFuture( + digestAndWorkersMap.entrySet().stream() + .filter(entry -> entry.getValue().isEmpty()) + .map(Map.Entry::getKey) + .collect(Collectors.toList())); + return transformAsync( + missingDigestFuture, + (missingDigest) -> { + extendLeaseForDigests(digestAndWorkersMap, requestMetadata); + return immediateFuture(missingDigest); + }, + // Propagate context values but don't cascade its cancellation for downstream calls. + Context.current().fork().fixedContextExecutor(directExecutor())); + } catch (Exception e) { + log.log(Level.SEVERE, "find missing blob via backplane failed", e); + return immediateFailedFuture(Status.fromThrowable(e).asException()); + } + } - FindMissingResponseEntry( - String worker, long elapsedMicros, Throwable exception, int stillMissingAfter) { - this.worker = worker; - this.elapsedMicros = elapsedMicros; - this.exception = exception; - this.stillMissingAfter = stillMissingAfter; + private Set filterAndAdjustWorkersForDigest( + Digest digest, + Set originalWorkerSetWithDigest, + Set activeWorkers, + Map workersStartTime) { + long insertTime; + try { + insertTime = backplane.getDigestInsertTime(digest); + } catch (IOException e) { + log.log(Level.WARNING, format("failed to get digest (%s) insertion time", digest)); + return Collections.emptySet(); + } + Set activeWorkersWithDigest = + Sets.intersection(originalWorkerSetWithDigest, activeWorkers); + Set workersStartedBeforeDigestInsertion = + activeWorkersWithDigest.stream() + .filter( + worker -> + workersStartTime.getOrDefault(worker, Instant.now().getEpochSecond()) + < insertTime) + .collect(Collectors.toSet()); + Set workersToBeRemoved = + Sets.difference(originalWorkerSetWithDigest, workersStartedBeforeDigestInsertion) + .immutableCopy(); + if (!workersToBeRemoved.isEmpty()) { + try { + log.log(Level.FINE, format("adjusting locations for the digest %s", digest)); + backplane.adjustBlobLocations(digest, Collections.emptySet(), workersToBeRemoved); + } catch (IOException e) { + log.log( + Level.WARNING, + format("error adjusting blob location for %s", DigestUtil.toString(digest)), + e); + } + } + return workersStartedBeforeDigestInsertion; + } + + private void extendLeaseForDigests( + Map> digestAndWorkersMap, RequestMetadata requestMetadata) { + Map> workerAndDigestMap = new HashMap<>(); + digestAndWorkersMap.forEach( + (digest, workers) -> + workers.forEach( + worker -> + workerAndDigestMap.computeIfAbsent(worker, w -> new HashSet<>()).add(digest))); + + workerAndDigestMap.forEach( + (worker, digests) -> workerStub(worker).findMissingBlobs(digests, requestMetadata)); + + try { + backplane.updateDigestsExpiry(digestAndWorkersMap.keySet()); + } catch (IOException e) { + log.log( + Level.WARNING, + format( + "Failed to update expiry duration for digests (%s) insertion time", + digestAndWorkersMap.keySet())); } } @@ -846,7 +961,7 @@ private void fetchBlobFromWorker( public void onNext(ByteString nextChunk) { blobObserver.onNext(nextChunk); received += nextChunk.size(); - ioMetric.observe(received); + ioMetric.observe(nextChunk.size()); } @Override @@ -859,13 +974,22 @@ public void onError(Throwable t) { } else if (status.getCode() == Code.NOT_FOUND) { casMissCounter.inc(); log.log( - Level.FINE, worker + " did not contain " + DigestUtil.toString(blobDigest)); + configs.getServer().isEnsureOutputsPresent() ? Level.WARNING : Level.FINER, + worker + " did not contain " + DigestUtil.toString(blobDigest)); // ignore this, the worker will update the backplane eventually } else if (status.getCode() != Code.DEADLINE_EXCEEDED && SHARD_IS_RETRIABLE.test(status)) { // why not, always workers.addLast(worker); } else { + log.log( + Level.WARNING, + format( + "%s: read(%s) on worker %s after %d bytes of content", + status.getCode().name(), + DigestUtil.toString(blobDigest), + worker, + received)); blobObserver.onError(t); return; } @@ -964,7 +1088,7 @@ public void getBlob( final ListenableFuture> populatedWorkerListFuture; if (emptyWorkerList) { log.log( - Level.FINE, + Level.FINER, format( "worker list was initially empty for %s, attempting to correct", DigestUtil.toString(blobDigest))); @@ -980,7 +1104,7 @@ public void getBlob( RequestMetadata.getDefaultInstance()), (foundOnWorkers) -> { log.log( - Level.FINE, + Level.FINER, format( "worker list was corrected for %s to be %s", DigestUtil.toString(blobDigest), foundOnWorkers.toString())); @@ -1010,7 +1134,7 @@ public void onError(Throwable t) { workersList.clear(); final ListenableFuture> workersListFuture; log.log( - Level.FINE, + Level.FINER, format( "worker list was depleted for %s, attempting to correct", DigestUtil.toString(blobDigest))); @@ -1020,13 +1144,13 @@ public void onError(Throwable t) { backplane, workerSet, locationSet, - ShardInstance.this::workerStub, + ServerInstance.this::workerStub, blobDigest, directExecutor(), RequestMetadata.getDefaultInstance()), (foundOnWorkers) -> { log.log( - Level.FINE, + Level.FINER, format( "worker list was corrected after depletion for %s to be %s", DigestUtil.toString(blobDigest), foundOnWorkers.toString())); @@ -1041,7 +1165,8 @@ public void onError(Throwable t) { @Override public void onQueue(Deque workers) { ctx.run( - () -> + () -> { + try { fetchBlobFromWorker( compressor, blobDigest, @@ -1049,7 +1174,11 @@ public void onQueue(Deque workers) { offset, count, checkedChunkObserver, - requestMetadata)); + requestMetadata); + } catch (Exception e) { + onFailure(e); + } + }); } @Override @@ -1074,7 +1203,8 @@ public void onCompleted() { @Override public void onQueue(Deque workers) { ctx.run( - () -> + () -> { + try { fetchBlobFromWorker( compressor, blobDigest, @@ -1082,7 +1212,11 @@ public void onQueue(Deque workers) { offset, count, chunkObserver, - requestMetadata)); + requestMetadata); + } catch (Exception e) { + onFailure(e); + } + }); } @Override @@ -1113,35 +1247,9 @@ public void onSuccess(List workersList) { protected abstract void onQueue(Deque workers); } - private CacheLoader writeInstanceCacheLoader() { - return new CacheLoader() { - @SuppressWarnings("NullableProblems") - @Override - public Instance load(BlobWriteKey key) { - String instance = null; - // Per the REAPI the identifier should end up as a unique UUID per a - // client level - adding bytes to further mitigate collisions and not - // store the entire BlobWriteKey. - String blobKey = key.getIdentifier() + "." + key.getDigest().getSizeBytes(); - try { - instance = backplane.getWriteInstance(blobKey); - if (instance != null) { - return workerStub(instance); - } - } catch (IOException e) { - log.log(Level.WARNING, "error getting write instance for " + instance, e); - } - - instance = getRandomWorker(); - try { - backplane.setWriteInstance(blobKey, instance); - log.log(Level.INFO, "set write-instance: " + blobKey + " -> " + instance); // TODO: [jmarino]: remove - } catch (IOException e) { - log.log(Level.WARNING, "error getting write instance for " + instance, e); - } - return workerStub(instance); - } - }; + private Instance writeInstanceSupplier() { + String worker = getRandomWorker(); + return workerStub(worker); } String getRandomWorker() { @@ -1348,7 +1456,7 @@ ListenableFuture expectDirectory( @Override public CompletableFuture apply(Digest digest, Executor executor) { log.log( - Level.FINE, + Level.FINER, format( "transformQueuedOperation(%s): fetching directory %s", reason, DigestUtil.toString(directoryBlobDigest))); @@ -1483,7 +1591,7 @@ private ListenableFuture transformQueuedOperation( expectCommand(commandDigest, requestMetadata), (command) -> { log.log( - Level.FINE, + Level.FINER, format("transformQueuedOperation(%s): fetched command", operationName)); if (command != null) { queuedOperationBuilder.setCommand(command); @@ -1576,6 +1684,7 @@ public void onFailure(Throwable t) { } }, directExecutor()); + write.reset(); // prevents a queryWriteStatus at index 0 try (OutputStream out = write.getOutput(timeout.getSeconds(), SECONDS, () -> {})) { content.writeTo(out); } catch (IOException e) { @@ -1618,7 +1727,10 @@ protected void validatePlatform( .setSubject(INVALID_PLATFORM) .setDescription( format( - "properties are not valid for queue eligibility: %s. If you think your queue should still accept these poperties without them being specified in queue configuration, consider configuring the queue with `allow_unmatched: True`", + "properties are not valid for queue eligibility: %s. If you think your queue" + + " should still accept these poperties without them being specified in queue" + + " configuration, consider configuring the queue with `allow_unmatched:" + + " True`", platform.getPropertiesList())); } @@ -1973,7 +2085,7 @@ public ListenableFuture execute( executionSuccess.inc(); log.log( - Level.FINE, + Level.FINER, new StringBuilder() .append("ExecutionSuccess: ") .append(requestMetadata.getToolInvocationId()) @@ -1986,7 +2098,7 @@ public ListenableFuture execute( actionCache.invalidate(DigestUtil.asActionKey(actionDigest)); if (!skipCacheLookup && recentCacheServedExecutions.getIfPresent(requestMetadata) != null) { log.log( - Level.FINE, + Level.FINER, format("Operation %s will have skip_cache_lookup = true due to retry", operationName)); skipCacheLookup = true; } @@ -2021,8 +2133,7 @@ public ListenableFuture execute( if (inDenyList(requestMetadata)) { watcher.observe( - operation - .toBuilder() + operation.toBuilder() .setDone(true) .setResponse(Any.pack(denyActionResponse(actionDigest, BLOCK_LIST_ERROR))) .build()); @@ -2032,7 +2143,7 @@ public ListenableFuture execute( return watchOperation( operation, newActionResultWatcher(DigestUtil.asActionKey(actionDigest), watcher), - /* initial=*/ false); + /* initial= */ false); } catch (IOException e) { return immediateFailedFuture(e); } @@ -2105,8 +2216,7 @@ private void deliverCachedActionResult( .build(); Operation completedOperation = - operation - .toBuilder() + operation.toBuilder() .setDone(true) .setResponse( Any.pack( @@ -2211,9 +2321,9 @@ public ListenableFuture queue(ExecuteEntry executeEntry, Poller poller, Du poller.pause(); long checkCacheUSecs = stopwatch.elapsed(MICROSECONDS); log.log( - Level.FINE, + Level.FINER, format( - "ShardInstance(%s): checkCache(%s): %sus elapsed", + "ServerInstance(%s): checkCache(%s): %sus elapsed", getName(), operation.getName(), checkCacheUSecs)); return IMMEDIATE_VOID_FUTURE; } @@ -2238,9 +2348,9 @@ private ListenableFuture transformAndQueue( Digest actionDigest = metadata.getActionDigest(); SettableFuture queueFuture = SettableFuture.create(); log.log( - Level.FINE, + Level.FINER, format( - "ShardInstance(%s): queue(%s): fetching action %s", + "ServerInstance(%s): queue(%s): fetching action %s", getName(), operation.getName(), actionDigest.getHash())); RequestMetadata requestMetadata = executeEntry.getRequestMetadata(); ListenableFuture actionFuture = @@ -2281,9 +2391,10 @@ private ListenableFuture transformAndQueue( actionFuture, (action) -> { log.log( - Level.FINE, + Level.FINER, format( - "ShardInstance(%s): queue(%s): fetched action %s transforming queuedOperation", + "ServerInstance(%s): queue(%s): fetched action %s transforming" + + " queuedOperation", getName(), operation.getName(), actionDigest.getHash())); Stopwatch transformStopwatch = Stopwatch.createStarted(); return transform( @@ -2311,9 +2422,9 @@ private ListenableFuture transformAndQueue( queuedFuture, (profiledQueuedMetadata) -> { log.log( - Level.FINE, + Level.FINER, format( - "ShardInstance(%s): queue(%s): queuedOperation %s transformed, validating", + "ServerInstance(%s): queue(%s): queuedOperation %s transformed, validating", getName(), operation.getName(), DigestUtil.toString( @@ -2333,9 +2444,9 @@ private ListenableFuture transformAndQueue( validatedFuture, (profiledQueuedMetadata) -> { log.log( - Level.FINE, + Level.FINER, format( - "ShardInstance(%s): queue(%s): queuedOperation %s validated, uploading", + "ServerInstance(%s): queue(%s): queuedOperation %s validated, uploading", getName(), operation.getName(), DigestUtil.toString( @@ -2385,9 +2496,10 @@ public void onSuccess(ProfiledQueuedOperationMetadata profiledQueuedMetadata) { long elapsedUSecs = stopwatch.elapsed(MICROSECONDS); long queueUSecs = elapsedUSecs - startQueueUSecs; log.log( - Level.FINE, + Level.FINER, format( - "ShardInstance(%s): queue(%s): %dus checkCache, %dus transform, %dus validate, %dus upload, %dus queue, %dus elapsed", + "ServerInstance(%s): queue(%s): %dus checkCache, %dus transform, %dus" + + " validate, %dus upload, %dus queue, %dus elapsed", getName(), queueOperation.getName(), checkCacheUSecs, @@ -2582,7 +2694,7 @@ public ListenableFuture watchOperation(String operationName, Watcher watch .withDescription(String.format("Operation not found: %s", operationName)) .asException()); } - return watchOperation(operation, watcher, /* initial=*/ true); + return watchOperation(operation, watcher, /* initial= */ true); } private static Operation stripOperation(Operation operation) { @@ -2596,8 +2708,7 @@ private static Operation stripOperation(Operation operation) { private static Operation stripQueuedOperation(Operation operation) { if (operation.getMetadata().is(QueuedOperationMetadata.class)) { operation = - operation - .toBuilder() + operation.toBuilder() .setMetadata(Any.pack(expectExecuteOperationMetadata(operation))) .build(); } @@ -2690,4 +2801,15 @@ private boolean inDenyList(RequestMetadata requestMetadata) throws IOException { } return backplane.isBlacklisted(requestMetadata); } + + @Override + protected CacheCapabilities getCacheCapabilities() { + SymlinkAbsolutePathStrategy.Value symlinkAbsolutePathStrategy = + configs.isAllowSymlinkTargetAbsolute() + ? SymlinkAbsolutePathStrategy.Value.ALLOWED + : SymlinkAbsolutePathStrategy.Value.DISALLOWED; + return super.getCacheCapabilities().toBuilder() + .setSymlinkAbsolutePathStrategy(symlinkAbsolutePathStrategy) + .build(); + } } diff --git a/src/main/java/build/buildfarm/instance/shard/Util.java b/src/main/java/build/buildfarm/instance/shard/Util.java index 5e3493d0c6..7070097659 100644 --- a/src/main/java/build/buildfarm/instance/shard/Util.java +++ b/src/main/java/build/buildfarm/instance/shard/Util.java @@ -141,7 +141,7 @@ public void onFailure(Throwable t) { } }; log.log( - Level.FINE, + Level.FINER, format( "scanning through %d workers to find %s", workerSet.size(), DigestUtil.toString(digest))); @@ -184,7 +184,7 @@ static void checkMissingBlobOnInstance( public void onSuccess(Iterable missingDigests) { boolean found = Iterables.isEmpty(missingDigests); log.log( - Level.FINE, + Level.FINER, format( "check missing response for %s to %s was %sfound", DigestUtil.toString(digest), worker, found ? "" : "not ")); @@ -197,7 +197,7 @@ public void onFailure(Throwable t) { Status status = Status.fromThrowable(t); if (status.getCode() == Code.UNAVAILABLE) { log.log( - Level.FINE, + Level.FINER, format( "check missing response for %s to %s was not found for unavailable", DigestUtil.toString(digest), worker)); diff --git a/src/main/java/build/buildfarm/instance/shard/WorkerStubs.java b/src/main/java/build/buildfarm/instance/shard/WorkerStubs.java index 28a29afd7f..abaf5f71e1 100644 --- a/src/main/java/build/buildfarm/instance/shard/WorkerStubs.java +++ b/src/main/java/build/buildfarm/instance/shard/WorkerStubs.java @@ -14,6 +14,7 @@ package build.buildfarm.instance.shard; +import static build.buildfarm.common.grpc.Channels.createChannel; import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor; @@ -28,9 +29,6 @@ import com.google.common.cache.RemovalListener; import com.google.common.util.concurrent.ListeningScheduledExecutorService; import com.google.protobuf.Duration; -import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; import java.util.concurrent.TimeUnit; public final class WorkerStubs { @@ -59,17 +57,12 @@ private static Instance newStubInstance(String worker, DigestUtil digestUtil, Du worker, digestUtil, createChannel(worker), + createChannel(worker), // separate write channel timeout, newStubRetrier(), newStubRetryService()); } - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - private static Retrier newStubRetrier() { return new Retrier( Backoff.exponential( diff --git a/src/main/java/build/buildfarm/instance/shard/Writes.java b/src/main/java/build/buildfarm/instance/shard/Writes.java index fd9547cf4d..d085107c08 100644 --- a/src/main/java/build/buildfarm/instance/shard/Writes.java +++ b/src/main/java/build/buildfarm/instance/shard/Writes.java @@ -39,6 +39,7 @@ import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; class Writes { private final LoadingCache blobWriteInstances; @@ -116,15 +117,22 @@ public ListenableFuture getFuture() { } } - Writes(CacheLoader instanceSupplier) { - this(instanceSupplier, /* writeExpiresAfter=*/ 1); + Writes(Supplier instanceSupplier) { + this(instanceSupplier, /* writeExpiresAfter= */ 1); } - Writes(CacheLoader instanceSupplier, long writeExpiresAfter) { + Writes(Supplier instanceSupplier, long writeExpiresAfter) { blobWriteInstances = CacheBuilder.newBuilder() .expireAfterWrite(writeExpiresAfter, TimeUnit.HOURS) - .build(instanceSupplier); + .build( + new CacheLoader() { + @SuppressWarnings("NullableProblems") + @Override + public Instance load(BlobWriteKey key) { + return instanceSupplier.get(); + } + }); } public Write get( diff --git a/src/main/java/build/buildfarm/instance/stub/BUILD b/src/main/java/build/buildfarm/instance/stub/BUILD index c8b2b82e77..e4e6d15b25 100644 --- a/src/main/java/build/buildfarm/instance/stub/BUILD +++ b/src/main/java/build/buildfarm/instance/stub/BUILD @@ -11,11 +11,12 @@ java_library( "//src/main/java/build/buildfarm/instance", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_grpc", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_longrunning_operations_java_grpc", - "@googleapis//:google_longrunning_operations_java_proto", - "@googleapis//:google_rpc_code_java_proto", + "//third_party/remote-apis:build_bazel_remote_execution_v2_remote_execution_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_grpc", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", @@ -23,11 +24,9 @@ java_library( "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_grpc", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/instance/stub/StubInstance.java b/src/main/java/build/buildfarm/instance/stub/StubInstance.java index 268d1314b9..cfcca8384f 100644 --- a/src/main/java/build/buildfarm/instance/stub/StubInstance.java +++ b/src/main/java/build/buildfarm/instance/stub/StubInstance.java @@ -18,6 +18,7 @@ import static build.buildfarm.common.grpc.TracingMetadataUtils.attachMetadataInterceptor; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; +import static com.google.common.util.concurrent.Futures.allAsList; import static com.google.common.util.concurrent.Futures.catching; import static com.google.common.util.concurrent.Futures.transform; import static com.google.common.util.concurrent.MoreExecutors.directExecutor; @@ -106,6 +107,7 @@ import com.google.bytestream.ByteStreamGrpc.ByteStreamStub; import com.google.bytestream.ByteStreamProto.ReadRequest; import com.google.bytestream.ByteStreamProto.ReadResponse; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Functions; import com.google.common.base.Supplier; import com.google.common.base.Suppliers; @@ -159,12 +161,15 @@ public class StubInstance implements Instance { private final String identifier; private final DigestUtil digestUtil; private final ManagedChannel channel; + private final ManagedChannel writeChannel; private final @Nullable Duration grpcTimeout; private final Retrier retrier; private final @Nullable ListeningScheduledExecutorService retryService; private boolean isStopped = false; private final long maxBatchUpdateBlobsSize = Size.mbToBytes(3); + @VisibleForTesting long maxRequestSize = Size.mbToBytes(4); + public StubInstance(String name, DigestUtil digestUtil, ManagedChannel channel) { this(name, "no-identifier", digestUtil, channel, Durations.fromDays(DEFAULT_DEADLINE_DAYS)); } @@ -180,7 +185,18 @@ public StubInstance( DigestUtil digestUtil, ManagedChannel channel, Duration grpcTimeout) { - this(name, identifier, digestUtil, channel, grpcTimeout, NO_RETRIES, /* retryService=*/ null); + this(name, identifier, digestUtil, channel, grpcTimeout, NO_RETRIES, /* retryService= */ null); + } + + public StubInstance( + String name, + String identifier, + DigestUtil digestUtil, + ManagedChannel channel, + Duration grpcTimeout, + Retrier retrier, + @Nullable ListeningScheduledExecutorService retryService) { + this(name, identifier, digestUtil, channel, channel, grpcTimeout, retrier, retryService); } @SuppressWarnings("NullableProblems") @@ -189,6 +205,7 @@ public StubInstance( String identifier, DigestUtil digestUtil, ManagedChannel channel, + ManagedChannel writeChannel, Duration grpcTimeout, Retrier retrier, @Nullable ListeningScheduledExecutorService retryService) { @@ -196,6 +213,7 @@ public StubInstance( this.identifier = identifier; this.digestUtil = digestUtil; this.channel = channel; + this.writeChannel = writeChannel; this.grpcTimeout = grpcTimeout; this.retrier = retrier; this.retryService = retryService; @@ -355,8 +373,14 @@ public void start(String publicName) {} @Override public void stop() throws InterruptedException { isStopped = true; - channel.shutdownNow(); - channel.awaitTermination(0, TimeUnit.SECONDS); + if (!channel.isShutdown()) { + channel.shutdownNow(); + channel.awaitTermination(0, TimeUnit.SECONDS); + } + if (!writeChannel.isShutdown()) { + writeChannel.shutdownNow(); + writeChannel.awaitTermination(0, TimeUnit.SECONDS); + } if (retryService != null && !shutdownAndAwaitTermination(retryService, 10, TimeUnit.SECONDS)) { log.log(Level.SEVERE, format("Could not shut down retry service for %s", identifier)); } @@ -413,11 +437,16 @@ public ListenableFuture> findMissingBlobs( .setInstanceName(getName()) .addAllBlobDigests(digests) .build(); - if (request.getSerializedSize() > Size.mbToBytes(4)) { - throw new IllegalStateException( - String.format( - "FINDMISSINGBLOBS IS TOO LARGE: %d digests are required in one request!", - request.getBlobDigestsCount())); + if (request.getSerializedSize() > maxRequestSize) { + // log2n partition for size reduction as needed + int partitionSize = (request.getBlobDigestsCount() + 1) / 2; + return transform( + allAsList( + Iterables.transform( + Iterables.partition(digests, partitionSize), + subDigests -> findMissingBlobs(subDigests, requestMetadata))), + subMissings -> Iterables.concat(subMissings), + directExecutor()); } return transform( deadlined(casFutureStub) @@ -474,7 +503,7 @@ public Write getOperationStreamWrite(String name) { name, Functions.identity(), StubWriteOutputStream.UNLIMITED_EXPECTED_SIZE, - /* autoflush=*/ true, + /* autoflush= */ true, RequestMetadata.getDefaultInstance()); } @@ -508,6 +537,7 @@ static class ReadBlobInterchange implements ClientResponseObserver blobObserver; private ClientCallStreamObserver requestStream; + // Guard against spurious onReady() calls caused by a race between onNext() and // onReady(). If the transport toggles isReady() from false to true while onNext() // is executing, but before onNext() checks isReady(). request(1) would be called @@ -515,9 +545,11 @@ static class ReadBlobInterchange implements ClientResponseObserver - ByteStreamGrpc.newStub(channel) + ByteStreamGrpc.newStub(writeChannel) .withInterceptors(attachMetadataInterceptor(requestMetadata))), resourceName, exceptionTranslator, @@ -697,7 +729,7 @@ public Write getBlobWrite( compressor == Compressor.Value.IDENTITY ? digest.getSizeBytes() : StubWriteOutputStream.UNLIMITED_EXPECTED_SIZE, - /* autoflush=*/ false, + /* autoflush= */ false, requestMetadata); } diff --git a/src/main/java/build/buildfarm/metrics/AbstractMetricsPublisher.java b/src/main/java/build/buildfarm/metrics/AbstractMetricsPublisher.java index 5f9422a421..493c378074 100644 --- a/src/main/java/build/buildfarm/metrics/AbstractMetricsPublisher.java +++ b/src/main/java/build/buildfarm/metrics/AbstractMetricsPublisher.java @@ -24,7 +24,6 @@ import com.google.protobuf.util.JsonFormat; import com.google.rpc.PreconditionFailure; import io.prometheus.client.Counter; -import io.prometheus.client.Gauge; import io.prometheus.client.Histogram; import java.util.logging.Level; import lombok.extern.java.Log; @@ -33,24 +32,31 @@ public abstract class AbstractMetricsPublisher implements MetricsPublisher { private static final Counter actionsCounter = Counter.build().name("actions").help("Number of actions.").register(); - private static final Gauge operationsInStage = - Gauge.build() + private static final Counter operationsInStage = + Counter.build() .name("operations_stage_load") .labelNames("stage_name") .help("Operations in stage.") .register(); - private static final Gauge operationStatus = - Gauge.build() + private static final Counter operationStatus = + Counter.build() .name("operation_status") .labelNames("status_code") .help("Operation execution status.") .register(); - private static final Gauge operationsPerWorker = - Gauge.build() + private static final Counter operationsPerWorker = + Counter.build() .name("operation_worker") .labelNames("worker_name") .help("Operations per worker.") .register(); + + private static final Counter operationExitCode = + Counter.build() + .name("operation_exit_code") + .labelNames("exit_code") + .help("Operation execution exit code.") + .register(); private static final Histogram queuedTime = Histogram.build().name("queued_time_ms").help("Queued time in ms.").register(); private static final Histogram outputUploadTime = @@ -63,7 +69,7 @@ public AbstractMetricsPublisher(String clusterId) { } public AbstractMetricsPublisher() { - this(/* clusterId=*/ null); + this(/* clusterId= */ null); } @Override @@ -88,8 +94,7 @@ protected OperationRequestMetadata populateRequestMetadata( .build(); if (operation.getDone() && operation.getResponse().is(ExecuteResponse.class)) { operationRequestMetadata = - operationRequestMetadata - .toBuilder() + operationRequestMetadata.toBuilder() .setExecuteResponse(operation.getResponse().unpack(ExecuteResponse.class)) .build(); operationStatus @@ -97,6 +102,11 @@ protected OperationRequestMetadata populateRequestMetadata( Integer.toString( operationRequestMetadata.getExecuteResponse().getStatus().getCode())) .inc(); + operationExitCode + .labels( + Integer.toString( + operationRequestMetadata.getExecuteResponse().getResult().getExitCode())) + .inc(); if (operationRequestMetadata.getExecuteResponse().hasResult() && operationRequestMetadata.getExecuteResponse().getResult().hasExecutionMetadata()) { operationsPerWorker @@ -139,8 +149,7 @@ protected OperationRequestMetadata populateRequestMetadata( } if (operation.getMetadata().is(ExecuteOperationMetadata.class)) { operationRequestMetadata = - operationRequestMetadata - .toBuilder() + operationRequestMetadata.toBuilder() .setExecuteOperationMetadata( operation.getMetadata().unpack(ExecuteOperationMetadata.class)) .build(); @@ -172,7 +181,7 @@ protected static String formatRequestMetadataToJson( .usingTypeRegistry(typeRegistry) .omittingInsignificantWhitespace() .print(operationRequestMetadata); - log.log(Level.FINE, "{}", formattedRequestMetadata); + log.log(Level.FINER, "{}", formattedRequestMetadata); return formattedRequestMetadata; } } diff --git a/src/main/java/build/buildfarm/metrics/BUILD b/src/main/java/build/buildfarm/metrics/BUILD index 69166f1ca8..6a280f05b2 100644 --- a/src/main/java/build/buildfarm/metrics/BUILD +++ b/src/main/java/build/buildfarm/metrics/BUILD @@ -5,14 +5,12 @@ java_library( visibility = ["//visibility:public"], deps = [ "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", - "@googleapis//:google_rpc_status_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_protobuf_protobuf_java_util", "@maven//:io_prometheus_simpleclient", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/metrics/aws/AwsMetricsPublisher.java b/src/main/java/build/buildfarm/metrics/aws/AwsMetricsPublisher.java deleted file mode 100644 index e0ae2785e0..0000000000 --- a/src/main/java/build/buildfarm/metrics/aws/AwsMetricsPublisher.java +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2020 The Bazel Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package build.buildfarm.metrics.aws; - -import build.bazel.remote.execution.v2.RequestMetadata; -import build.buildfarm.common.config.BuildfarmConfigs; -import build.buildfarm.metrics.AbstractMetricsPublisher; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.handlers.AsyncHandler; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; -import com.amazonaws.services.sns.AmazonSNSAsync; -import com.amazonaws.services.sns.AmazonSNSAsyncClientBuilder; -import com.amazonaws.services.sns.model.PublishRequest; -import com.amazonaws.services.sns.model.PublishResult; -import com.amazonaws.util.StringUtils; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.longrunning.Operation; -import java.io.IOException; -import java.util.Base64; -import java.util.HashMap; -import java.util.logging.Level; -import lombok.extern.java.Log; - -@Log -public class AwsMetricsPublisher extends AbstractMetricsPublisher { - private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - private static AmazonSNSAsync snsClient; - - private final String snsTopicOperations; - private String accessKeyId = null; - private String secretKey = null; - private final String region; - private final int snsClientMaxConnections; - - public AwsMetricsPublisher() { - super(configs.getServer().getClusterId()); - snsTopicOperations = configs.getServer().getMetrics().getTopic(); - region = configs.getServer().getCloudRegion(); - getAwsSecret(configs.getServer().getMetrics().getSecretName()); - snsClientMaxConnections = configs.getServer().getMetrics().getTopicMaxConnections(); - if (!StringUtils.isNullOrEmpty(snsTopicOperations) - && snsClientMaxConnections > 0 - && !StringUtils.isNullOrEmpty(accessKeyId) - && !StringUtils.isNullOrEmpty(secretKey) - && !StringUtils.isNullOrEmpty(region)) { - snsClient = initSnsClient(); - } - } - - @Override - public void publishRequestMetadata(Operation operation, RequestMetadata requestMetadata) { - try { - if (snsClient != null) { - snsClient.publishAsync( - new PublishRequest( - snsTopicOperations, - formatRequestMetadataToJson(populateRequestMetadata(operation, requestMetadata))), - new AsyncHandler() { - @Override - public void onError(Exception e) { - log.log(Level.WARNING, "Could not publish metrics data to SNS.", e); - } - - @Override - public void onSuccess(PublishRequest request, PublishResult publishResult) {} - }); - } - } catch (Exception e) { - log.log( - Level.WARNING, - String.format("Could not publish request metadata to SNS for %s.", operation.getName()), - e); - } - } - - private AmazonSNSAsync initSnsClient() { - log.log(Level.INFO, "Initializing SNS Client."); - return AmazonSNSAsyncClientBuilder.standard() - .withRegion(region) - .withClientConfiguration( - new ClientConfiguration().withMaxConnections(snsClientMaxConnections)) - .withCredentials( - new AWSStaticCredentialsProvider( - new AWSCredentials() { - @Override - public String getAWSAccessKeyId() { - return accessKeyId; - } - - @Override - public String getAWSSecretKey() { - return secretKey; - } - })) - .build(); - } - - @Override - public void publishMetric(String metricName, Object metricValue) { - throw new UnsupportedOperationException(); - } - - @SuppressWarnings("unchecked") - private void getAwsSecret(String secretName) { - AWSSecretsManager client = AWSSecretsManagerClientBuilder.standard().withRegion(region).build(); - GetSecretValueRequest getSecretValueRequest = - new GetSecretValueRequest().withSecretId(secretName); - GetSecretValueResult getSecretValueResult; - try { - getSecretValueResult = client.getSecretValue(getSecretValueRequest); - } catch (Exception e) { - log.log(Level.SEVERE, String.format("Could not get secret %s from AWS.", secretName)); - return; - } - String secret; - if (getSecretValueResult.getSecretString() != null) { - secret = getSecretValueResult.getSecretString(); - } else { - secret = - new String(Base64.getDecoder().decode(getSecretValueResult.getSecretBinary()).array()); - } - - if (secret != null) { - try { - final ObjectMapper objectMapper = new ObjectMapper(); - final HashMap secretMap = objectMapper.readValue(secret, HashMap.class); - accessKeyId = secretMap.get("access_key"); - secretKey = secretMap.get("secret_key"); - } catch (IOException e) { - log.log(Level.SEVERE, String.format("Could not parse secret %s from AWS", secretName)); - } - } - } -} diff --git a/src/main/java/build/buildfarm/metrics/aws/BUILD b/src/main/java/build/buildfarm/metrics/aws/BUILD deleted file mode 100644 index 51d44ea8c9..0000000000 --- a/src/main/java/build/buildfarm/metrics/aws/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -java_library( - name = "aws", - srcs = glob(["*.java"]), - plugins = ["//src/main/java/build/buildfarm/common:lombok"], - visibility = ["//visibility:public"], - deps = [ - "//src/main/java/build/buildfarm/common/config", - "//src/main/java/build/buildfarm/metrics", - "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", - "@googleapis//:google_rpc_status_java_proto", - "@maven//:com_amazonaws_aws_java_sdk_core", - "@maven//:com_amazonaws_aws_java_sdk_secretsmanager", - "@maven//:com_amazonaws_aws_java_sdk_sns", - "@maven//:com_fasterxml_jackson_core_jackson_databind", - "@maven//:com_google_guava_guava", - "@maven//:com_google_protobuf_protobuf_java_util", - "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", - ], -) diff --git a/src/main/java/build/buildfarm/metrics/gcp/BUILD b/src/main/java/build/buildfarm/metrics/gcp/BUILD deleted file mode 100644 index 765902b905..0000000000 --- a/src/main/java/build/buildfarm/metrics/gcp/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -java_library( - name = "gcp", - srcs = glob(["*.java"]), - visibility = ["//visibility:public"], - deps = [ - "//src/main/java/build/buildfarm/common/config", - "//src/main/java/build/buildfarm/metrics", - "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", - "@googleapis//:google_rpc_status_java_proto", - "@maven//:com_google_guava_guava", - "@maven//:com_google_protobuf_protobuf_java_util", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", - ], -) diff --git a/src/main/java/build/buildfarm/metrics/log/BUILD b/src/main/java/build/buildfarm/metrics/log/BUILD index 88ea37849e..aa325a3cc7 100644 --- a/src/main/java/build/buildfarm/metrics/log/BUILD +++ b/src/main/java/build/buildfarm/metrics/log/BUILD @@ -7,12 +7,10 @@ java_library( "//src/main/java/build/buildfarm/common/config", "//src/main/java/build/buildfarm/metrics", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", - "@googleapis//:google_rpc_status_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java_util", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/operations/BUILD b/src/main/java/build/buildfarm/operations/BUILD index ee8df831d9..3e32dc934b 100644 --- a/src/main/java/build/buildfarm/operations/BUILD +++ b/src/main/java/build/buildfarm/operations/BUILD @@ -11,10 +11,8 @@ java_library( deps = [ "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_proto", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", - "@googleapis//:google_rpc_error_details_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", @@ -26,6 +24,6 @@ java_library( "@maven//:io_grpc_grpc_core", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/operations/finder/BUILD b/src/main/java/build/buildfarm/operations/finder/BUILD index 5c8342609b..a49f083fae 100644 --- a/src/main/java/build/buildfarm/operations/finder/BUILD +++ b/src/main/java/build/buildfarm/operations/finder/BUILD @@ -10,9 +10,7 @@ java_library( "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/operations", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "//third_party/jedis", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", @@ -22,11 +20,11 @@ java_library( "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", "@maven//:org_apache_commons_commons_pool2", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/operations/finder/EnrichedOperationBuilder.java b/src/main/java/build/buildfarm/operations/finder/EnrichedOperationBuilder.java index f6cdfcbd2a..2dac505d1d 100644 --- a/src/main/java/build/buildfarm/operations/finder/EnrichedOperationBuilder.java +++ b/src/main/java/build/buildfarm/operations/finder/EnrichedOperationBuilder.java @@ -33,7 +33,7 @@ import com.google.rpc.PreconditionFailure; import java.util.logging.Level; import lombok.extern.java.Log; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.UnifiedJedis; /** * @class EnrichedOperationBuilder @@ -55,9 +55,9 @@ public class EnrichedOperationBuilder { * @note Suggested return identifier: operation. */ public static EnrichedOperation build( - JedisCluster cluster, Instance instance, String operationKey) { + UnifiedJedis jedis, Instance instance, String operationKey) { EnrichedOperation operationWithMetadata = new EnrichedOperation(); - operationWithMetadata.operation = operationKeyToOperation(cluster, operationKey); + operationWithMetadata.operation = operationKeyToOperation(jedis, operationKey); // the operation could not be fetched so there is nothing further to derive if (operationWithMetadata.operation == null) { return operationWithMetadata; @@ -83,8 +83,8 @@ public static EnrichedOperation build( * @return The looked up operation. * @note Suggested return identifier: operation. */ - public static Operation operationKeyToOperation(JedisCluster cluster, String operationKey) { - String json = cluster.get(operationKey); + public static Operation operationKeyToOperation(UnifiedJedis jedis, String operationKey) { + String json = jedis.get(operationKey); return jsonToOperation(json); } diff --git a/src/main/java/build/buildfarm/operations/finder/OperationsFinder.java b/src/main/java/build/buildfarm/operations/finder/OperationsFinder.java index 2bdb0c5648..912d16e811 100644 --- a/src/main/java/build/buildfarm/operations/finder/OperationsFinder.java +++ b/src/main/java/build/buildfarm/operations/finder/OperationsFinder.java @@ -34,10 +34,10 @@ import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; import org.json.simple.parser.ParseException; -import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisCluster; -import redis.clients.jedis.ScanParams; -import redis.clients.jedis.ScanResult; +import redis.clients.jedis.UnifiedJedis; +import redis.clients.jedis.params.ScanParams; +import redis.clients.jedis.resps.ScanResult; /** * @class OperationsFinder @@ -57,24 +57,29 @@ public class OperationsFinder { * @note Suggested return identifier: results. */ public static FindOperationsResults findEnrichedOperations( - JedisCluster cluster, Instance instance, FindOperationsSettings settings) { + UnifiedJedis jedis, Instance instance, FindOperationsSettings settings) { FindOperationsResults results = new FindOperationsResults(); results.operations = new HashMap<>(); adjustFilter(settings); - // JedisCluster only supports SCAN commands with MATCH patterns containing hash-tags. - // This prevents us from using the cluster's SCAN to traverse all of the CAS. - // That's why we choose to scan each of the jedisNode's individually. - cluster - .getClusterNodes() - .values() - .forEach( - pool -> { - try (Jedis node = pool.getResource()) { - findEnrichedOperationOnNode(cluster, node, instance, settings, results); - } - }); + if (jedis instanceof JedisCluster) { + JedisCluster cluster = (JedisCluster) jedis; + // JedisCluster only supports SCAN commands with MATCH patterns containing hash-tags. + // This prevents us from using the cluster's SCAN to traverse all of the CAS. + // That's why we choose to scan each of the jedisNode's individually. + cluster + .getClusterNodes() + .values() + .forEach( + pool -> { + try (UnifiedJedis node = new UnifiedJedis(pool.getResource())) { + findEnrichedOperationOnNode(cluster, node, instance, settings, results); + } + }); + } else { + findEnrichedOperationOnNode(jedis, jedis, instance, settings, results); + } return results; } @@ -89,23 +94,28 @@ public static FindOperationsResults findEnrichedOperations( * @note Suggested return identifier: results. */ public static List findOperations( - JedisCluster cluster, FindOperationsSettings settings) { + UnifiedJedis jedis, FindOperationsSettings settings) { List results = new ArrayList<>(); adjustFilter(settings); - // JedisCluster only supports SCAN commands with MATCH patterns containing hash-tags. - // This prevents us from using the cluster's SCAN to traverse all of the CAS. - // That's why we choose to scan each of the jedisNode's individually. - cluster - .getClusterNodes() - .values() - .forEach( - pool -> { - try (Jedis node = pool.getResource()) { - findOperationOnNode(cluster, node, settings, results); - } - }); + if (jedis instanceof JedisCluster) { + JedisCluster cluster = (JedisCluster) jedis; + // JedisCluster only supports SCAN commands with MATCH patterns containing hash-tags. + // This prevents us from using the cluster's SCAN to traverse all of the CAS. + // That's why we choose to scan each of the jedisNode's individually. + cluster + .getClusterNodes() + .values() + .forEach( + pool -> { + try (UnifiedJedis node = new UnifiedJedis(pool.getResource())) { + findOperationOnNode(cluster, node, settings, results); + } + }); + } else { + findOperationOnNode(jedis, jedis, settings, results); + } return results; } @@ -135,8 +145,8 @@ private static void adjustFilter(FindOperationsSettings settings) { */ @SuppressWarnings({"unchecked", "rawtypes"}) private static void findEnrichedOperationOnNode( - JedisCluster cluster, - Jedis node, + UnifiedJedis cluster, + UnifiedJedis node, Instance instance, FindOperationsSettings settings, FindOperationsResults results) { @@ -169,7 +179,10 @@ private static void findEnrichedOperationOnNode( * @param results Accumulating results from performing a search. */ private static void findOperationOnNode( - JedisCluster cluster, Jedis node, FindOperationsSettings settings, List results) { + UnifiedJedis cluster, + UnifiedJedis node, + FindOperationsSettings settings, + List results) { // iterate over all operation entries via scanning // construct query @@ -198,7 +211,7 @@ private static void findOperationOnNode( * @param results Accumulating results from finding operations. */ private static void collectOperations( - JedisCluster cluster, + UnifiedJedis cluster, Instance instance, List operationKeys, String filterPredicate, @@ -220,7 +233,7 @@ private static void collectOperations( * @param results Accumulating results from finding operations. */ private static void collectOperations( - JedisCluster cluster, + UnifiedJedis cluster, List operationKeys, String filterPredicate, List results) { diff --git a/src/main/java/build/buildfarm/proxy/http/AuthAndTLSOptions.java b/src/main/java/build/buildfarm/proxy/http/AuthAndTLSOptions.java index 5121eb0f64..d4aaa8d947 100644 --- a/src/main/java/build/buildfarm/proxy/http/AuthAndTLSOptions.java +++ b/src/main/java/build/buildfarm/proxy/http/AuthAndTLSOptions.java @@ -25,8 +25,8 @@ public class AuthAndTLSOptions extends OptionsBase { name = "google_default_credentials", defaultValue = "false", help = - "Whether to use 'Google Application Default Credentials' for authentication." - + " See https://cloud.google.com/docs/authentication for details. Disabled by default.") + "Whether to use 'Google Application Default Credentials' for authentication. See" + + " https://cloud.google.com/docs/authentication for details. Disabled by default.") public boolean useGoogleDefaultCredentials; @Option( diff --git a/src/main/java/build/buildfarm/proxy/http/BUILD b/src/main/java/build/buildfarm/proxy/http/BUILD index cfb4fa2674..9e82c18735 100644 --- a/src/main/java/build/buildfarm/proxy/http/BUILD +++ b/src/main/java/build/buildfarm/proxy/http/BUILD @@ -7,10 +7,10 @@ java_library( "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/common/resources:resource_java_proto", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_status_java_proto", + "//third_party/remote-apis:build_bazel_remote_execution_v2_remote_execution_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_github_pcj_google_options", "@maven//:com_google_auth_google_auth_library_credentials", "@maven//:com_google_auth_google_auth_library_oauth2_http", @@ -23,6 +23,7 @@ java_library( "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", + "@maven//:io_grpc_grpc_util", "@maven//:io_netty_netty_buffer", "@maven//:io_netty_netty_codec", "@maven//:io_netty_netty_codec_http", @@ -35,7 +36,6 @@ java_library( "@maven//:io_netty_netty_transport_native_kqueue", "@maven//:io_netty_netty_transport_native_unix_common", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_grpc", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/proxy/http/BlobWriteObserver.java b/src/main/java/build/buildfarm/proxy/http/BlobWriteObserver.java index a88fc77756..34caf3679e 100644 --- a/src/main/java/build/buildfarm/proxy/http/BlobWriteObserver.java +++ b/src/main/java/build/buildfarm/proxy/http/BlobWriteObserver.java @@ -103,7 +103,8 @@ private void validateRequest(WriteRequest request) { log.log( Level.WARNING, String.format( - "ByteStreamServer:write:%s: finish_write request of size %d for write size %d != expected %d", + "ByteStreamServer:write:%s: finish_write request of size %d for write size %d !=" + + " expected %d", resourceName, request.getData().size(), sizeAfterWrite, size)); throw new IllegalArgumentException("Write size invalid: " + sizeAfterWrite); } diff --git a/src/main/java/build/buildfarm/proxy/http/HttpBlobStore.java b/src/main/java/build/buildfarm/proxy/http/HttpBlobStore.java index 2b8be55439..0b26984278 100644 --- a/src/main/java/build/buildfarm/proxy/http/HttpBlobStore.java +++ b/src/main/java/build/buildfarm/proxy/http/HttpBlobStore.java @@ -406,7 +406,7 @@ private boolean isChannelPipelineEmpty(ChannelPipeline pipeline) { @Override public boolean containsKey(String key) throws IOException, InterruptedException { try { - return get(key, /* out=*/ null, true, false).get(); + return get(key, /* out= */ null, true, false).get(); } catch (ExecutionException e) { Throwable cause = e.getCause(); Throwables.throwIfInstanceOf(e, IOException.class); diff --git a/src/main/java/build/buildfarm/proxy/http/HttpProxy.java b/src/main/java/build/buildfarm/proxy/http/HttpProxy.java index 0a8ccddd59..9f5e9a1762 100644 --- a/src/main/java/build/buildfarm/proxy/http/HttpProxy.java +++ b/src/main/java/build/buildfarm/proxy/http/HttpProxy.java @@ -53,7 +53,7 @@ public HttpProxy( SimpleBlobStore simpleBlobStore = HttpBlobStore.create( URI.create(options.httpCache), - /* remoteMaxConnections=*/ 0, + /* remoteMaxConnections= */ 0, (int) SECONDS.toMillis(options.timeout), creds); server = diff --git a/src/main/java/build/buildfarm/proxy/http/Utils.java b/src/main/java/build/buildfarm/proxy/http/Utils.java index e65b2cd121..edae84d240 100644 --- a/src/main/java/build/buildfarm/proxy/http/Utils.java +++ b/src/main/java/build/buildfarm/proxy/http/Utils.java @@ -21,6 +21,7 @@ /** Utility methods for the remote package. * */ public final class Utils { private Utils() {} + /** * Returns the result of a {@link ListenableFuture} if successful, or throws any checked {@link * Exception} directly if it's an {@link IOException} or else wraps it in an {@link IOException}. diff --git a/src/main/java/build/buildfarm/rpms/server/BUILD b/src/main/java/build/buildfarm/rpms/server/BUILD index 31942644e8..d407be4589 100644 --- a/src/main/java/build/buildfarm/rpms/server/BUILD +++ b/src/main/java/build/buildfarm/rpms/server/BUILD @@ -1,4 +1,4 @@ -load("@rules_pkg//:rpm.bzl", "pkg_rpm") +load("@rules_pkg//pkg:rpm.bzl", "pkg_rpm") pkg_rpm( name = "buildfarm-server-rpm", diff --git a/src/main/java/build/buildfarm/rpms/worker/BUILD b/src/main/java/build/buildfarm/rpms/worker/BUILD index 1a06f01c90..98e76fb81e 100644 --- a/src/main/java/build/buildfarm/rpms/worker/BUILD +++ b/src/main/java/build/buildfarm/rpms/worker/BUILD @@ -1,4 +1,4 @@ -load("@rules_pkg//:rpm.bzl", "pkg_rpm") +load("@rules_pkg//pkg:rpm.bzl", "pkg_rpm") pkg_rpm( name = "buildfarm-worker-rpm", diff --git a/src/main/java/build/buildfarm/server/BUILD b/src/main/java/build/buildfarm/server/BUILD index b05d2e5ef7..ba31d7a875 100644 --- a/src/main/java/build/buildfarm/server/BUILD +++ b/src/main/java/build/buildfarm/server/BUILD @@ -14,25 +14,19 @@ java_library( "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/shard", "//src/main/java/build/buildfarm/metrics/prometheus", - "//src/main/java/build/buildfarm/server/controllers:WebController", "//src/main/java/build/buildfarm/server/services", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "@io_grpc_grpc_proto//:health_java_proto", "@maven//:com_github_pcj_google_options", + "@maven//:com_google_api_grpc_proto_google_common_protos", "@maven//:com_google_guava_guava", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_core", "@maven//:io_grpc_grpc_services", + "@maven//:io_grpc_grpc_util", "@maven//:io_prometheus_simpleclient", "@maven//:javax_annotation_javax_annotation_api", "@maven//:org_bouncycastle_bcprov_jdk15on", "@maven//:org_projectlombok_lombok", - "@maven//:org_springframework_boot_spring_boot", - "@maven//:org_springframework_boot_spring_boot_autoconfigure", - "@maven//:org_springframework_boot_spring_boot_starter_thymeleaf", - "@maven//:org_springframework_boot_spring_boot_starter_web", - "@maven//:org_springframework_spring_beans", - "@maven//:org_springframework_spring_context", - "@maven//:org_springframework_spring_core", - "@maven//:org_springframework_spring_web", ], ) diff --git a/src/main/java/build/buildfarm/server/BuildFarmServer.java b/src/main/java/build/buildfarm/server/BuildFarmServer.java index 76b36d5366..be5f934539 100644 --- a/src/main/java/build/buildfarm/server/BuildFarmServer.java +++ b/src/main/java/build/buildfarm/server/BuildFarmServer.java @@ -15,58 +15,48 @@ package build.buildfarm.server; import static build.buildfarm.common.io.Utils.formatIOError; -import static com.google.common.base.Preconditions.checkState; import static com.google.common.util.concurrent.MoreExecutors.shutdownAndAwaitTermination; import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor; +import static java.util.concurrent.TimeUnit.SECONDS; import static java.util.logging.Level.SEVERE; +import static java.util.logging.Level.WARNING; import build.buildfarm.common.DigestUtil; +import build.buildfarm.common.LoggingMain; import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.common.config.GrpcMetrics; import build.buildfarm.common.grpc.TracingMetadataUtils.ServerHeadersInterceptor; import build.buildfarm.common.services.ByteStreamService; import build.buildfarm.common.services.ContentAddressableStorageService; import build.buildfarm.instance.Instance; -import build.buildfarm.instance.shard.ShardInstance; +import build.buildfarm.instance.shard.ServerInstance; import build.buildfarm.metrics.prometheus.PrometheusPublisher; -import build.buildfarm.server.controllers.WebController; import build.buildfarm.server.services.ActionCacheService; -import build.buildfarm.server.services.AdminService; import build.buildfarm.server.services.CapabilitiesService; import build.buildfarm.server.services.ExecutionService; import build.buildfarm.server.services.FetchService; import build.buildfarm.server.services.OperationQueueService; import build.buildfarm.server.services.OperationsService; import build.buildfarm.server.services.PublishBuildEventService; -import com.google.devtools.common.options.OptionsParsingException; import io.grpc.ServerBuilder; import io.grpc.ServerInterceptor; import io.grpc.health.v1.HealthCheckResponse.ServingStatus; +import io.grpc.protobuf.services.HealthStatusManager; import io.grpc.protobuf.services.ProtoReflectionService; -import io.grpc.services.HealthStatusManager; import io.grpc.util.TransmitStatusRuntimeExceptionInterceptor; import io.prometheus.client.Counter; import java.io.File; import java.io.IOException; import java.security.Security; -import java.util.HashMap; -import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import javax.annotation.PostConstruct; -import javax.annotation.PreDestroy; +import java.util.concurrent.atomic.AtomicBoolean; import javax.naming.ConfigurationException; import lombok.extern.java.Log; import org.bouncycastle.jce.provider.BouncyCastleProvider; -import org.springframework.boot.SpringApplication; -import org.springframework.boot.autoconfigure.SpringBootApplication; -import org.springframework.context.annotation.ComponentScan; -@SuppressWarnings("deprecation") @Log -@SpringBootApplication -@ComponentScan("build.buildfarm") -public class BuildFarmServer { +public class BuildFarmServer extends LoggingMain { private static final java.util.logging.Logger nettyLogger = java.util.logging.Logger.getLogger("io.grpc.netty"); private static final Counter healthCheckMetric = @@ -80,20 +70,56 @@ public class BuildFarmServer { private Instance instance; private HealthStatusManager healthStatusManager; private io.grpc.Server server; - private boolean stopping = false; private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); + private AtomicBoolean shutdownInitiated = new AtomicBoolean(true); + private AtomicBoolean released = new AtomicBoolean(true); - private ShardInstance createInstance() + BuildFarmServer() { + super("BuildFarmServer"); + } + + /** + * The method will prepare the server for graceful shutdown when the server is ready. Current + * implementation waits specified period of time. Future improvements could be to keep track of + * open connections and shutdown when there are not left. Note on using stderr here instead of + * log. By the time this is called in PreDestroy, the log is no longer available and is not + * logging messages. + */ + public void prepareServerForGracefulShutdown() { + if (configs.getServer().getGracefulShutdownSeconds() == 0) { + log.severe("Graceful Shutdown is not enabled. Server is shutting down immediately."); + } else { + try { + log.info( + String.format( + "Graceful Shutdown - Waiting %d to allow connections to drain.", + configs.getServer().getGracefulShutdownSeconds())); + SECONDS.sleep(configs.getServer().getGracefulShutdownSeconds()); + } catch (InterruptedException e) { + log.severe( + "Graceful Shutdown - The server graceful shutdown is interrupted: " + e.getMessage()); + } finally { + log.info( + String.format( + "Graceful Shutdown - It took the server %d seconds to shutdown", + configs.getServer().getGracefulShutdownSeconds())); + } + } + } + + private ServerInstance createInstance() throws IOException, ConfigurationException, InterruptedException { - return new ShardInstance( + return new ServerInstance( configs.getServer().getName(), configs.getServer().getSession() + "-" + configs.getServer().getName(), new DigestUtil(configs.getDigestFunction()), - this::stop); + this::initiateShutdown); } public synchronized void start(ServerBuilder serverBuilder, String publicName) throws IOException, ConfigurationException, InterruptedException { + shutdownInitiated.set(false); + released.set(false); instance = createInstance(); healthStatusManager = new HealthStatusManager(); @@ -123,7 +149,6 @@ public synchronized void start(ServerBuilder serverBuilder, String publicName .addService(new ExecutionService(instance, keepaliveScheduler)) .addService(new OperationQueueService(instance)) .addService(new OperationsService(instance)) - .addService(new AdminService(instance)) .addService(new FetchService(instance)) .addService(ProtoReflectionService.newInstance()) .addService(new PublishBuildEventService()) @@ -141,9 +166,7 @@ public synchronized void start(ServerBuilder serverBuilder, String publicName log.info(String.format("%s initialized", configs.getServer().getSession())); - checkState(!stopping, "must not call start after stop"); instance.start(publicName); - WebController.setInstance((ShardInstance) instance); server.start(); healthStatusManager.setStatus( @@ -152,38 +175,72 @@ public synchronized void start(ServerBuilder serverBuilder, String publicName healthCheckMetric.labels("start").inc(); } - @PreDestroy - public void stop() { - synchronized (this) { - if (stopping) { - return; - } - stopping = true; + private synchronized void awaitRelease() throws InterruptedException { + while (!released.get()) { + wait(); + } + } + + synchronized void stop() throws InterruptedException { + try { + shutdown(); + } finally { + released.set(true); + notify(); + } + } + + private void shutdown() throws InterruptedException { + log.info("*** shutting down gRPC server since JVM is shutting down"); + prepareServerForGracefulShutdown(); + if (healthStatusManager != null) { + healthStatusManager.setStatus( + HealthStatusManager.SERVICE_NAME_ALL_SERVICES, ServingStatus.NOT_SERVING); } - System.err.println("*** shutting down gRPC server since JVM is shutting down"); - healthStatusManager.setStatus( - HealthStatusManager.SERVICE_NAME_ALL_SERVICES, ServingStatus.NOT_SERVING); PrometheusPublisher.stopHttpServer(); healthCheckMetric.labels("stop").inc(); try { - if (server != null) { - server.shutdown(); - } + initiateShutdown(); instance.stop(); - server.awaitTermination(10, TimeUnit.SECONDS); + if (server != null && server.awaitTermination(10, TimeUnit.SECONDS)) { + server = null; + } } catch (InterruptedException e) { if (server != null) { server.shutdownNow(); + server = null; } + throw e; } if (!shutdownAndAwaitTermination(keepaliveScheduler, 10, TimeUnit.SECONDS)) { log.warning("could not shut down keepalive scheduler"); } - System.err.println("*** server shut down"); + log.info("*** server shut down"); + } + + @Override + protected void onShutdown() throws InterruptedException { + initiateShutdown(); + awaitRelease(); } - @PostConstruct - public void init() throws OptionsParsingException { + private void initiateShutdown() { + shutdownInitiated.set(true); + if (server != null) { + server.shutdown(); + } + } + + private void awaitTermination() throws InterruptedException { + while (!shutdownInitiated.get()) { + if (server != null && server.awaitTermination(1, TimeUnit.SECONDS)) { + server = null; + shutdownInitiated.set(true); + } + } + } + + public static void main(String[] args) throws Exception { // Only log severe log messages from Netty. Otherwise it logs warnings that look like this: // // 170714 08:16:28.552:WT 18 [io.grpc.netty.NettyServerHandler.onStreamError] Stream Error @@ -191,33 +248,24 @@ public void init() throws OptionsParsingException { // unknown stream 11369 nettyLogger.setLevel(SEVERE); + configs = BuildfarmConfigs.loadServerConfigs(args); + + // Configure Spring + BuildFarmServer server = new BuildFarmServer(); + try { - start( + server.start( ServerBuilder.forPort(configs.getServer().getPort()), configs.getServer().getPublicName()); + server.awaitTermination(); } catch (IOException e) { - System.err.println("error: " + formatIOError(e)); + log.severe("error: " + formatIOError(e)); } catch (InterruptedException e) { - System.err.println("error: interrupted"); - } catch (ConfigurationException e) { - throw new RuntimeException(e); + log.log(WARNING, "interrupted", e); + } catch (Exception e) { + log.log(SEVERE, "Error running application", e); + } finally { + server.stop(); } } - - public static void main(String[] args) throws ConfigurationException { - configs = BuildfarmConfigs.loadServerConfigs(args); - - // Configure Spring - SpringApplication app = new SpringApplication(BuildFarmServer.class); - Map springConfig = new HashMap<>(); - - // Disable Logback - System.setProperty("org.springframework.boot.logging.LoggingSystem", "none"); - - springConfig.put("ui.frontend.enable", configs.getUi().isEnable()); - springConfig.put("server.port", configs.getUi().getPort()); - app.setDefaultProperties(springConfig); - - app.run(args); - } } diff --git a/src/main/java/build/buildfarm/server/controllers/BUILD b/src/main/java/build/buildfarm/server/controllers/BUILD deleted file mode 100644 index 1e523bcef5..0000000000 --- a/src/main/java/build/buildfarm/server/controllers/BUILD +++ /dev/null @@ -1,46 +0,0 @@ -java_library( - name = "WebController", - srcs = ["WebController.java"], - plugins = ["//src/main/java/build/buildfarm/common:lombok"], - visibility = ["//visibility:public"], - deps = [ - "//src/main/java/build/buildfarm/common", - "//src/main/java/build/buildfarm/common/config", - "//src/main/java/build/buildfarm/common/grpc", - "//src/main/java/build/buildfarm/common/resources", - "//src/main/java/build/buildfarm/common/resources:resource_java_proto", - "//src/main/java/build/buildfarm/common/services", - "//src/main/java/build/buildfarm/instance", - "//src/main/java/build/buildfarm/instance/shard", - "//src/main/java/build/buildfarm/metrics/prometheus", - "//src/main/java/build/buildfarm/operations", - "//src/main/java/build/buildfarm/server/services", - "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "//src/main/protobuf:build_buildfarm_v1test_buildfarm_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", - "@googleapis//:google_rpc_error_details_proto", - "@maven//:com_github_pcj_google_options", - "@maven//:com_google_guava_guava", - "@maven//:com_google_protobuf_protobuf_java", - "@maven//:com_google_protobuf_protobuf_java_util", - "@maven//:com_googlecode_json_simple_json_simple", - "@maven//:com_jayway_jsonpath_json_path", - "@maven//:io_grpc_grpc_api", - "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_protobuf", - "@maven//:io_grpc_grpc_services", - "@maven//:io_prometheus_simpleclient", - "@maven//:javax_annotation_javax_annotation_api", - "@maven//:org_bouncycastle_bcprov_jdk15on", - "@maven//:org_projectlombok_lombok", - "@maven//:org_springframework_boot_spring_boot", - "@maven//:org_springframework_boot_spring_boot_autoconfigure", - "@maven//:org_springframework_boot_spring_boot_starter_web", - "@maven//:org_springframework_spring_beans", - "@maven//:org_springframework_spring_context", - "@maven//:org_springframework_spring_core", - "@maven//:org_springframework_spring_web", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", - ], -) diff --git a/src/main/java/build/buildfarm/server/controllers/WebController.java b/src/main/java/build/buildfarm/server/controllers/WebController.java deleted file mode 100644 index 5ed5c8250e..0000000000 --- a/src/main/java/build/buildfarm/server/controllers/WebController.java +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2023 The Bazel Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package build.buildfarm.server.controllers; - -import build.bazel.remote.execution.v2.ExecuteOperationMetadata; -import build.bazel.remote.execution.v2.ExecuteResponse; -import build.bazel.remote.execution.v2.RequestMetadata; -import build.buildfarm.instance.shard.ShardInstance; -import build.buildfarm.operations.EnrichedOperation; -import build.buildfarm.v1test.CompletedOperationMetadata; -import build.buildfarm.v1test.ExecutingOperationMetadata; -import build.buildfarm.v1test.QueuedOperationMetadata; -import com.google.longrunning.Operation; -import com.google.protobuf.Any; -import com.google.protobuf.Duration; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.Timestamp; -import com.google.protobuf.util.Durations; -import com.google.protobuf.util.JsonFormat; -import com.google.protobuf.util.Timestamps; -import com.google.rpc.PreconditionFailure; -import java.util.Map; -import java.util.Set; -import java.util.logging.Level; -import lombok.extern.java.Log; -import org.json.simple.JSONArray; -import org.json.simple.JSONObject; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.stereotype.Controller; -import org.springframework.ui.Model; -import org.springframework.web.bind.annotation.GetMapping; -import org.springframework.web.bind.annotation.PathVariable; - -// An HTTP frontend for providing a web UI in buildfarm. Its primarily used by developers so that -// they can introspect their build inovocations. Standalone tools that communicate over GRPC are -// sometimes less accessible. Nonetheless, the APIs should be similar. -// The web frontend can be deployed as part of the servers. However, the controller may also be -// included in other standalone tools if you'd like the frontend decoupled from the servers. -// This controller can provide web content directly through spring boot via thymeleaf. -// The controller also provides raw REST APIs for those wishing to build out their own frontend. -@Log -@Controller -@ConditionalOnProperty("ui.frontend.enable") -public class WebController { - private static ShardInstance instance; - - // Most of the routes require an instance to have their queries fulfilled. - // I wanted to have the instance set in the controller's constructor, but I was having trouble - // doing that with springboot's bean initialization and auto-wiring. - // Particularly because the controller is constructed before the instance is actually created. - // I settled for having the instance static and allowing the server startup to provide the - // instance once ready. - public static void setInstance(ShardInstance instanceIn) { - instance = instanceIn; - } - - // This is typically the starting for a developer looking to introspect their builds. - // When running a bazel client with `--bes_results_url` they will shown this URL route. - // The page is intented to give them a summary of their build invocation and allow them to drill - // down into more details. - @GetMapping("/invocation/{invocationId}") - public String invocation(Model model, @PathVariable String invocationId) { - // We need to look up the user's operations as fast as possible. Scanning all of the stored - // operations and filtering by invocatio ID (i.e. O(n)) does not scale. Instead, the invocation - // ID must be the primary key to their specific list of build operation IDs. We then lookup - // each - // operation by ID (preferably batched). This is technically two backend calls. It could be - // made faster - // at the expense of duplicate information stored in the backend. - Set operationIDs = instance.findOperationsByInvocationId(invocationId); - Iterable> foundOperations = instance.getOperations(operationIDs); - - // Populate the model for the page. - buildInvocationModel(model, invocationId, foundOperations); - - // Render page. - return "invocation"; - } - - private void buildInvocationModel( - Model model, String invocationId, Iterable> foundOperations) { - // Create an array representing table information about all the oprations involved in the - // invocation. - // This is the core content of the page. - JSONArray operationResults = new JSONArray(); - for (Map.Entry entry : foundOperations) { - Operation operation = jsonToOperation(entry.getValue()); - JSONObject obj = new JSONObject(); - obj.put("target", extractTargetId(operation)); - obj.put("mnemonic", extractActionMnemonic(operation)); - obj.put("stage", extractStatus(operation)); - obj.put("duration", extractDuration(operation)); - obj.put("worker", extractWorker(operation)); - - String operationId = entry.getKey(); - String id = operationId.substring(operationId.lastIndexOf('/')).substring(1); - obj.put("operationId", id); - - operationResults.add(obj); - } - - // Populate data to be provided to the frontend. - model.addAttribute("operation", operationResults.toJSONString()); - model.addAttribute("invocationId", String.format("Invocation: %s", invocationId)); - } - - // An operation represents an executed action. - // The operation has a target ID which corresponds to the bazel label what developers are - // typically thinking of when wishing to evaluate their build. - // This page shows them all of the information that we track related to the operation. - @GetMapping("/operation/{operationId}") - public String operation(Model model, @PathVariable String operationId) { - EnrichedOperation result = - instance.findEnrichedOperation(String.format("shard/operations/%s", operationId)); - model.addAttribute("fullOperation", result.asJsonString()); - return "operation"; - } - - // Information about the current deployment. Useful for verifying what is running. - @GetMapping("/info") - public String info(Model model) { - return "info"; - } - - /** - * @brief Convert string json into operation type. - * @details Parses json and returns null if invalid. - * @param json The json to convert to Operation type. - * @return The created operation. - * @note Suggested return identifier: operation. - */ - private static Operation jsonToOperation(String json) { - // create a json parser - JsonFormat.Parser operationParser = - JsonFormat.parser() - .usingTypeRegistry( - JsonFormat.TypeRegistry.newBuilder() - .add(CompletedOperationMetadata.getDescriptor()) - .add(ExecutingOperationMetadata.getDescriptor()) - .add(ExecuteOperationMetadata.getDescriptor()) - .add(QueuedOperationMetadata.getDescriptor()) - .add(PreconditionFailure.getDescriptor()) - .build()) - .ignoringUnknownFields(); - - if (json == null) { - log.log(Level.WARNING, "Operation Json is empty"); - return null; - } - try { - Operation.Builder operationBuilder = Operation.newBuilder(); - operationParser.merge(json, operationBuilder); - return operationBuilder.build(); - } catch (InvalidProtocolBufferException e) { - log.log(Level.WARNING, "InvalidProtocolBufferException while building an operation.", e); - return null; - } - } - - private String extractTargetId(Operation operation) { - return expectRequestMetadata(operation).getTargetId(); - } - - private String extractActionMnemonic(Operation operation) { - return expectRequestMetadata(operation).getActionMnemonic(); - } - - private String extractStatus(Operation operation) { - return String.valueOf(expectExecuteOperationMetadata(operation).getStage()); - } - - private String extractDuration(Operation operation) { - Any result = operation.getResponse(); - try { - Timestamp start = - result - .unpack(ExecuteResponse.class) - .getResult() - .getExecutionMetadata() - .getWorkerStartTimestamp(); - Timestamp end = - result - .unpack(ExecuteResponse.class) - .getResult() - .getExecutionMetadata() - .getWorkerCompletedTimestamp(); - Duration duration = Timestamps.between(start, end); - return Durations.toSecondsAsDouble(duration) + "s"; - } catch (InvalidProtocolBufferException e) { - System.out.println(e.toString()); - return "Unknown"; - } - } - - private String extractWorker(Operation operation) { - Any result = operation.getResponse(); - try { - return result.unpack(ExecuteResponse.class).getResult().getExecutionMetadata().getWorker(); - } catch (InvalidProtocolBufferException e) { - System.out.println(e.toString()); - return "Unknown"; - } - } - - private static ExecuteOperationMetadata expectExecuteOperationMetadata(Operation operation) { - String name = operation.getName(); - Any metadata = operation.getMetadata(); - QueuedOperationMetadata queuedOperationMetadata = maybeQueuedOperationMetadata(name, metadata); - if (queuedOperationMetadata != null) { - return queuedOperationMetadata.getExecuteOperationMetadata(); - } - ExecutingOperationMetadata executingOperationMetadata = - maybeExecutingOperationMetadata(name, metadata); - if (executingOperationMetadata != null) { - return executingOperationMetadata.getExecuteOperationMetadata(); - } - CompletedOperationMetadata completedOperationMetadata = - maybeCompletedOperationMetadata(name, metadata); - if (completedOperationMetadata != null) { - return completedOperationMetadata.getExecuteOperationMetadata(); - } - return ExecuteOperationMetadata.getDefaultInstance(); - } - - private static RequestMetadata expectRequestMetadata(Operation operation) { - String name = operation.getName(); - Any metadata = operation.getMetadata(); - QueuedOperationMetadata queuedOperationMetadata = maybeQueuedOperationMetadata(name, metadata); - if (queuedOperationMetadata != null) { - return queuedOperationMetadata.getRequestMetadata(); - } - ExecutingOperationMetadata executingOperationMetadata = - maybeExecutingOperationMetadata(name, metadata); - if (executingOperationMetadata != null) { - return executingOperationMetadata.getRequestMetadata(); - } - CompletedOperationMetadata completedOperationMetadata = - maybeCompletedOperationMetadata(name, metadata); - if (completedOperationMetadata != null) { - return completedOperationMetadata.getRequestMetadata(); - } - return RequestMetadata.getDefaultInstance(); - } - - private static QueuedOperationMetadata maybeQueuedOperationMetadata(String name, Any metadata) { - if (metadata.is(QueuedOperationMetadata.class)) { - try { - return metadata.unpack(QueuedOperationMetadata.class); - } catch (InvalidProtocolBufferException e) { - log.log(Level.SEVERE, String.format("invalid executing operation metadata %s", name), e); - } - } - return null; - } - - private static ExecutingOperationMetadata maybeExecutingOperationMetadata( - String name, Any metadata) { - if (metadata.is(ExecutingOperationMetadata.class)) { - try { - return metadata.unpack(ExecutingOperationMetadata.class); - } catch (InvalidProtocolBufferException e) { - log.log(Level.SEVERE, String.format("invalid executing operation metadata %s", name), e); - } - } - return null; - } - - private static CompletedOperationMetadata maybeCompletedOperationMetadata( - String name, Any metadata) { - if (metadata.is(CompletedOperationMetadata.class)) { - try { - return metadata.unpack(CompletedOperationMetadata.class); - } catch (InvalidProtocolBufferException e) { - log.log(Level.SEVERE, String.format("invalid completed operation metadata %s", name), e); - } - } - return null; - } -} diff --git a/src/main/java/build/buildfarm/server/services/AdminService.java b/src/main/java/build/buildfarm/server/services/AdminService.java deleted file mode 100644 index 968edc1572..0000000000 --- a/src/main/java/build/buildfarm/server/services/AdminService.java +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2020 The Bazel Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package build.buildfarm.server.services; - -import build.buildfarm.admin.Admin; -import build.buildfarm.admin.aws.AwsAdmin; -import build.buildfarm.admin.gcp.GcpAdmin; -import build.buildfarm.common.CasIndexResults; -import build.buildfarm.common.config.BuildfarmConfigs; -import build.buildfarm.instance.Instance; -import build.buildfarm.v1test.AdminGrpc; -import build.buildfarm.v1test.DisableScaleInProtectionRequest; -import build.buildfarm.v1test.DisableScaleInProtectionRequestResults; -import build.buildfarm.v1test.GetClientStartTimeRequest; -import build.buildfarm.v1test.GetClientStartTimeResult; -import build.buildfarm.v1test.GetHostsRequest; -import build.buildfarm.v1test.GetHostsResult; -import build.buildfarm.v1test.PrepareWorkerForGracefulShutDownRequest; -import build.buildfarm.v1test.ReindexCasRequest; -import build.buildfarm.v1test.ReindexCasRequestResults; -import build.buildfarm.v1test.ScaleClusterRequest; -import build.buildfarm.v1test.ShutDownWorkerGracefullyRequest; -import build.buildfarm.v1test.ShutDownWorkerGracefullyRequestResults; -import build.buildfarm.v1test.ShutDownWorkerGrpc; -import build.buildfarm.v1test.StopContainerRequest; -import build.buildfarm.v1test.TerminateHostRequest; -import com.google.rpc.Code; -import com.google.rpc.Status; -import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; -import io.grpc.stub.StreamObserver; -import java.util.logging.Level; -import lombok.extern.java.Log; - -@Log -public class AdminService extends AdminGrpc.AdminImplBase { - private final Admin adminController; - private final Instance instance; - - private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - - public AdminService(Instance instance) { - this.adminController = getAdminController(); - this.instance = instance; - } - - @Override - public void terminateHost(TerminateHostRequest request, StreamObserver responseObserver) { - try { - if (adminController != null) { - adminController.terminateHost(request.getHostId()); - } - responseObserver.onNext(Status.newBuilder().setCode(Code.OK_VALUE).build()); - responseObserver.onCompleted(); - } catch (Exception e) { - log.log(Level.SEVERE, "Could not terminate host.", e); - responseObserver.onError(io.grpc.Status.fromThrowable(e).asException()); - } - } - - @Override - public void stopContainer(StopContainerRequest request, StreamObserver responseObserver) { - try { - if (adminController != null) { - adminController.stopContainer(request.getHostId(), request.getContainerName()); - } - responseObserver.onNext(Status.newBuilder().setCode(Code.OK_VALUE).build()); - responseObserver.onCompleted(); - } catch (Exception e) { - log.log(Level.SEVERE, "Could not stop container.", e); - responseObserver.onError(io.grpc.Status.fromThrowable(e).asException()); - } - } - - @Override - public void getHosts(GetHostsRequest request, StreamObserver responseObserver) { - try { - GetHostsResult result = null; - if (adminController != null) { - result = - adminController.getHosts( - request.getFilter(), request.getAgeInMinutes(), request.getStatus()); - } - responseObserver.onNext(result); - responseObserver.onCompleted(); - } catch (Exception e) { - log.log(Level.SEVERE, "Could not get hosts.", e); - responseObserver.onError(io.grpc.Status.fromThrowable(e).asException()); - } - } - - @Override - public void getClientStartTime( - GetClientStartTimeRequest request, - StreamObserver responseObserver) { - try { - GetClientStartTimeResult result = instance.getClientStartTime(request); - responseObserver.onNext(result); - responseObserver.onCompleted(); - } catch (Exception e) { - log.log( - Level.SEVERE, - String.format("Could not get client start time for %s.", request.getInstanceName()), - e); - responseObserver.onError(io.grpc.Status.fromThrowable(e).asException()); - } - } - - @Override - public void scaleCluster(ScaleClusterRequest request, StreamObserver responseObserver) { - try { - if (adminController != null) { - adminController.scaleCluster( - request.getScaleGroupName(), - request.getMinHosts(), - request.getMaxHosts(), - request.getTargetHosts(), - request.getTargetReservedHostsPercent()); - } - responseObserver.onNext(Status.newBuilder().setCode(Code.OK_VALUE).build()); - responseObserver.onCompleted(); - } catch (Exception e) { - log.log(Level.SEVERE, "Could not scale cluster.", e); - responseObserver.onError(io.grpc.Status.fromThrowable(e).asException()); - } - } - - @Override - public void reindexCas( - ReindexCasRequest request, StreamObserver responseObserver) { - try { - CasIndexResults results = instance.reindexCas(); - log.info(String.format("CAS Indexer Results: %s", results.toMessage())); - responseObserver.onNext( - ReindexCasRequestResults.newBuilder() - .setRemovedHosts(results.removedHosts) - .setRemovedKeys(results.removedKeys) - .setTotalKeys(results.totalKeys) - .build()); - responseObserver.onCompleted(); - } catch (Exception e) { - log.log(Level.SEVERE, "Could not reindex CAS.", e); - responseObserver.onError(io.grpc.Status.fromThrowable(e).asException()); - } - } - - /** - * Server-side implementation of ShutDownWorkerGracefully. This will reroute the request to target - * worker. - * - * @param request ShutDownWorkerGracefullyRequest received through grpc - * @param responseObserver grpc response observer - */ - @Override - public void shutDownWorkerGracefully( - ShutDownWorkerGracefullyRequest request, - StreamObserver responseObserver) { - try { - informWorkerToPrepareForShutdown(request.getWorkerName()); - responseObserver.onNext(ShutDownWorkerGracefullyRequestResults.newBuilder().build()); - responseObserver.onCompleted(); - } catch (Exception e) { - String errorMessage = - String.format( - "Could not inform the worker %s to prepare for graceful shutdown with error %s.", - request.getWorkerName(), e.getMessage()); - log.log(Level.SEVERE, errorMessage); - responseObserver.onError(new Exception(errorMessage)); - } - } - - /** - * Inform a worker to prepare for graceful shutdown. - * - * @param host the host that should be prepared for shutdown. - */ - @SuppressWarnings("ResultOfMethodCallIgnored") - private void informWorkerToPrepareForShutdown(String host) { - ManagedChannel channel = null; - try { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(host).negotiationType(NegotiationType.PLAINTEXT); - channel = builder.build(); - ShutDownWorkerGrpc.ShutDownWorkerBlockingStub shutDownWorkerBlockingStub = - ShutDownWorkerGrpc.newBlockingStub(channel); - shutDownWorkerBlockingStub.prepareWorkerForGracefulShutdown( - PrepareWorkerForGracefulShutDownRequest.newBuilder().build()); - } finally { - if (channel != null) { - channel.shutdown(); - } - } - } - - /** - * Server-side implementation of disableScaleInProtection. - * - * @param request grpc request - * @param responseObserver grpc response observer - */ - @Override - public void disableScaleInProtection( - DisableScaleInProtectionRequest request, - StreamObserver responseObserver) { - try { - String hostPrivateIp = trimHostPrivateDns(request.getInstanceName()); - adminController.disableHostScaleInProtection(hostPrivateIp); - responseObserver.onNext(DisableScaleInProtectionRequestResults.newBuilder().build()); - responseObserver.onCompleted(); - } catch (RuntimeException e) { - responseObserver.onError(e); - } - } - - /** - * The private dns get from worker might be suffixed with ":portNumber", which should be trimmed. - * - * @param hostPrivateIp the private dns should be trimmed. - * @return - */ - @SuppressWarnings("JavaDoc") - private String trimHostPrivateDns(String hostPrivateIp) { - String portSeparator = ":"; - if (hostPrivateIp.contains(portSeparator)) { - hostPrivateIp = hostPrivateIp.split(portSeparator)[0]; - } - return hostPrivateIp; - } - - private static Admin getAdminController() { - if (configs.getServer().getAdmin().getDeploymentEnvironment() == null) { - return null; - } - switch (configs.getServer().getAdmin().getDeploymentEnvironment()) { - default: - return null; - case AWS: - return new AwsAdmin(); - case GCP: - return new GcpAdmin(); - } - } -} diff --git a/src/main/java/build/buildfarm/server/services/BUILD b/src/main/java/build/buildfarm/server/services/BUILD index aff642ed7a..d210928fe7 100644 --- a/src/main/java/build/buildfarm/server/services/BUILD +++ b/src/main/java/build/buildfarm/server/services/BUILD @@ -4,41 +4,34 @@ java_library( plugins = ["//src/main/java/build/buildfarm/common:lombok"], visibility = ["//visibility:public"], deps = [ - "//src/main/java/build/buildfarm/admin", - "//src/main/java/build/buildfarm/admin/aws", - "//src/main/java/build/buildfarm/admin/gcp", "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/common/config", "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/common/resources:resource_java_proto", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/metrics", - "//src/main/java/build/buildfarm/metrics/aws", - "//src/main/java/build/buildfarm/metrics/gcp", "//src/main/java/build/buildfarm/metrics/log", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_grpc", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_devtools_build_v1_build_events_java_proto", - "@googleapis//:google_devtools_build_v1_publish_build_event_java_grpc", - "@googleapis//:google_devtools_build_v1_publish_build_event_java_proto", - "@googleapis//:google_longrunning_operations_java_grpc", - "@googleapis//:google_rpc_code_java_proto", + "//third_party/remote-apis:build_bazel_remote_asset_v1_remote_asset_java_grpc", + "//third_party/remote-apis:build_bazel_remote_execution_v2_remote_execution_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/devtools/build/v1:build_java_grpc", + "@com_google_googleapis//google/devtools/build/v1:build_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_grpc", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_code_findbugs_jsr305", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_services", "@maven//:io_grpc_grpc_stub", "@maven//:io_prometheus_simpleclient", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_asset_v1_remote_asset_java_grpc", - "@remote_apis//:build_bazel_remote_asset_v1_remote_asset_java_proto", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_grpc", - "@remote_apis//:build_bazel_semver_java_proto", + "@remoteapis//build/bazel/remote/asset/v1:remote_asset_java_proto", + "@remoteapis//build/bazel/semver:semver_java_proto", ], ) diff --git a/src/main/java/build/buildfarm/server/services/CapabilitiesService.java b/src/main/java/build/buildfarm/server/services/CapabilitiesService.java index d97ec2b3fa..ffb5a8aa51 100644 --- a/src/main/java/build/buildfarm/server/services/CapabilitiesService.java +++ b/src/main/java/build/buildfarm/server/services/CapabilitiesService.java @@ -38,9 +38,7 @@ public void getCapabilities( GetCapabilitiesRequest request, StreamObserver responseObserver) { numberOfRemoteInvocations.inc(); responseObserver.onNext( - instance - .getCapabilities() - .toBuilder() + instance.getCapabilities().toBuilder() .setLowApiVersion(SemVer.newBuilder().setMajor(2)) .setHighApiVersion(SemVer.newBuilder().setMajor(2)) .build()); diff --git a/src/main/java/build/buildfarm/server/services/ExecutionService.java b/src/main/java/build/buildfarm/server/services/ExecutionService.java index 7f46c0bb3c..69799524fb 100644 --- a/src/main/java/build/buildfarm/server/services/ExecutionService.java +++ b/src/main/java/build/buildfarm/server/services/ExecutionService.java @@ -28,8 +28,6 @@ import build.buildfarm.common.grpc.TracingMetadataUtils; import build.buildfarm.instance.Instance; import build.buildfarm.metrics.MetricsPublisher; -import build.buildfarm.metrics.aws.AwsMetricsPublisher; -import build.buildfarm.metrics.gcp.GcpMetricsPublisher; import build.buildfarm.metrics.log.LogMetricsPublisher; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.ListenableFuture; @@ -205,13 +203,6 @@ public void execute(ExecuteRequest request, StreamObserver responseOb } private static MetricsPublisher getMetricsPublisher() { - switch (configs.getServer().getMetrics().getPublisher()) { - default: - return new LogMetricsPublisher(); - case AWS: - return new AwsMetricsPublisher(); - case GCP: - return new GcpMetricsPublisher(); - } + return new LogMetricsPublisher(); } } diff --git a/src/main/java/build/buildfarm/server/services/FetchService.java b/src/main/java/build/buildfarm/server/services/FetchService.java index 35be1784e2..17a33c68ca 100644 --- a/src/main/java/build/buildfarm/server/services/FetchService.java +++ b/src/main/java/build/buildfarm/server/services/FetchService.java @@ -72,7 +72,7 @@ private void fetchBlob( if (expectedDigest == null) { responseObserver.onError( Status.INVALID_ARGUMENT - .withDescription(format("Missing qualifier 'checksum.sri'")) + .withDescription("Missing qualifier 'checksum.sri'") .asException()); } else if (request.getUrisCount() != 0) { addCallback( diff --git a/src/main/java/build/buildfarm/server/services/OperationQueueService.java b/src/main/java/build/buildfarm/server/services/OperationQueueService.java index 6d3edb8178..11f6501667 100644 --- a/src/main/java/build/buildfarm/server/services/OperationQueueService.java +++ b/src/main/java/build/buildfarm/server/services/OperationQueueService.java @@ -28,9 +28,7 @@ import com.google.rpc.Code; import io.grpc.Status; import io.grpc.StatusRuntimeException; -import io.grpc.stub.ServerCallStreamObserver; import io.grpc.stub.StreamObserver; -import java.util.function.Consumer; public class OperationQueueService extends OperationQueueGrpc.OperationQueueImplBase { private final Instance instance; @@ -43,14 +41,11 @@ private static class OperationQueueMatchListener implements MatchListener { @SuppressWarnings("rawtypes") private final InterruptingPredicate onMatch; - private final Consumer setOnCancelHandler; private static final QueueEntry queueEntry = null; @SuppressWarnings("rawtypes") - OperationQueueMatchListener( - InterruptingPredicate onMatch, Consumer setOnCancelHandler) { + OperationQueueMatchListener(InterruptingPredicate onMatch) { this.onMatch = onMatch; - this.setOnCancelHandler = setOnCancelHandler; } @Override @@ -70,11 +65,6 @@ public void onError(Throwable t) { Throwables.throwIfUnchecked(t); throw new RuntimeException(t); } - - @Override - public void setOnCancelHandler(Runnable onCancelHandler) { - setOnCancelHandler.accept(onCancelHandler); - } } private InterruptingPredicate createOnMatch( @@ -97,14 +87,10 @@ private InterruptingPredicate createOnMatch( @Override public void take(TakeOperationRequest request, StreamObserver responseObserver) { - ServerCallStreamObserver callObserver = - (ServerCallStreamObserver) responseObserver; - try { instance.match( request.getPlatform(), - new OperationQueueMatchListener( - createOnMatch(instance, responseObserver), callObserver::setOnCancelHandler)); + new OperationQueueMatchListener(createOnMatch(instance, responseObserver))); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } diff --git a/src/main/java/build/buildfarm/tools/Ac.java b/src/main/java/build/buildfarm/tools/Ac.java index adfca598ff..9515fe0ec4 100644 --- a/src/main/java/build/buildfarm/tools/Ac.java +++ b/src/main/java/build/buildfarm/tools/Ac.java @@ -14,6 +14,8 @@ package build.buildfarm.tools; +import static build.buildfarm.common.grpc.Channels.createChannel; + import build.bazel.remote.execution.v2.ActionResult; import build.buildfarm.common.DigestUtil; import build.buildfarm.common.DigestUtil.HashFunction; @@ -21,18 +23,10 @@ import build.buildfarm.instance.stub.StubInstance; import com.google.protobuf.ByteString; import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; // This tool can be used to interact directly with the Action Cache API. // ./tool shard SHA256 class Ac { - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - public static void main(String[] args) throws Exception { // get arguments for establishing an instance String host = args[0]; diff --git a/src/main/java/build/buildfarm/tools/BUILD b/src/main/java/build/buildfarm/tools/BUILD index c2f9d94ece..60ea837540 100644 --- a/src/main/java/build/buildfarm/tools/BUILD +++ b/src/main/java/build/buildfarm/tools/BUILD @@ -5,17 +5,17 @@ java_binary( visibility = ["//visibility:public"], deps = [ "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/stub", "//src/main/java/build/buildfarm/worker", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_code_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", ], @@ -37,7 +37,7 @@ java_binary( "@maven//:com_github_jnr_jnr_ffi", "@maven//:com_github_jnr_jnr_posix", "@maven//:com_google_guava_guava", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) @@ -57,21 +57,22 @@ java_binary( visibility = ["//visibility:public"], deps = [ "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/instance/stub", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_status_java_proto", + "//third_party/remote-apis:build_bazel_remote_execution_v2_remote_execution_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/longrunning:operations_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_grpc", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) @@ -83,17 +84,16 @@ java_binary( deps = [ "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/common/grpc", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) @@ -108,18 +108,17 @@ java_binary( deps = [ ":worker-profiler-printer", "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/stub", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_protobuf_protobuf_java_util", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", ], @@ -137,13 +136,13 @@ java_binary( ":worker-profiler-printer", "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/common/config", + "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/common/redis", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/shard", "//src/main/java/build/buildfarm/instance/stub", "//src/main/java/build/buildfarm/worker/shard", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "//third_party/jedis", "@maven//:com_github_pcj_google_options", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", @@ -151,9 +150,9 @@ java_binary( "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", + "@maven//:redis_clients_jedis", ], ) @@ -168,33 +167,32 @@ java_binary( deps = [ ":worker-profiler-printer", "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/stub", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_protobuf_protobuf_java_util", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", ], ) java_binary( - name = "GracefulShutdownTest", - srcs = ["GracefulShutdownTest.java"], - main_class = "build.buildfarm.tools.GracefulShutdownTest", + name = "GracefulShutdown", + srcs = ["GracefulShutdown.java"], + main_class = "build.buildfarm.tools.GracefulShutdown", visibility = ["//visibility:public"], deps = [ + "//src/main/java/build/buildfarm/common/grpc", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_grpc", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "@maven//:io_grpc_grpc_api", - "@maven//:io_grpc_grpc_netty", ], ) @@ -205,19 +203,19 @@ java_binary( visibility = ["//visibility:public"], deps = [ "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/stub", - "@googleapis//:google_longrunning_operations_java_proto", - "@googleapis//:google_rpc_code_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) @@ -228,19 +226,19 @@ java_binary( visibility = ["//visibility:public"], deps = [ "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/stub", - "@googleapis//:google_longrunning_operations_java_proto", - "@googleapis//:google_rpc_code_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) @@ -251,19 +249,19 @@ java_binary( visibility = ["//visibility:public"], deps = [ "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/stub", - "@googleapis//:google_longrunning_operations_java_proto", - "@googleapis//:google_rpc_code_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) @@ -281,12 +279,12 @@ java_binary( visibility = ["//visibility:public"], deps = [ "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/stub", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", ], @@ -305,7 +303,6 @@ java_library( "//src/main/java/build/buildfarm/instance/stub", "//src/main/java/build/buildfarm/worker/shard", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "//third_party/jedis", "@maven//:com_github_pcj_google_options", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", @@ -313,8 +310,8 @@ java_library( "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", + "@maven//:redis_clients_jedis", ], ) diff --git a/src/main/java/build/buildfarm/tools/CacheLoad.java b/src/main/java/build/buildfarm/tools/CacheLoad.java index 1694f32da9..7686ae63da 100644 --- a/src/main/java/build/buildfarm/tools/CacheLoad.java +++ b/src/main/java/build/buildfarm/tools/CacheLoad.java @@ -67,10 +67,10 @@ public static void main(String[] args) throws Exception { new LocalCASFileCache( root, configs.getWorker().getStorages().get(0), - /* maxSizeInBytes=*/ Size.gbToBytes(500), + /* maxSizeInBytes= */ Size.gbToBytes(500), new DigestUtil(HashFunction.SHA1), - /* expireService=*/ newDirectExecutorService(), - /* accessRecorder=*/ directExecutor()); + /* expireService= */ newDirectExecutorService(), + /* accessRecorder= */ directExecutor()); // Start cache and measure startup time (reported internally). StartupCacheResults results = fileCache.start(newDirectExecutorService(), true); diff --git a/src/main/java/build/buildfarm/tools/Cancel.java b/src/main/java/build/buildfarm/tools/Cancel.java index 5945df708e..24805034dc 100644 --- a/src/main/java/build/buildfarm/tools/Cancel.java +++ b/src/main/java/build/buildfarm/tools/Cancel.java @@ -14,20 +14,14 @@ package build.buildfarm.tools; +import static build.buildfarm.common.grpc.Channels.createChannel; + import build.buildfarm.common.DigestUtil; import build.buildfarm.instance.Instance; import build.buildfarm.instance.stub.StubInstance; import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; class Cancel { - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - public static void main(String[] args) throws Exception { String host = args[0]; String instanceName = args[1]; diff --git a/src/main/java/build/buildfarm/tools/Cat.java b/src/main/java/build/buildfarm/tools/Cat.java index b8008e434a..06fdf92b82 100644 --- a/src/main/java/build/buildfarm/tools/Cat.java +++ b/src/main/java/build/buildfarm/tools/Cat.java @@ -14,6 +14,7 @@ package build.buildfarm.tools; +import static build.buildfarm.common.grpc.Channels.createChannel; import static build.buildfarm.instance.Utils.getBlob; import static com.google.common.util.concurrent.MoreExecutors.shutdownAndAwaitTermination; import static java.lang.String.format; @@ -68,8 +69,6 @@ import io.grpc.Context; import io.grpc.ManagedChannel; import io.grpc.Status; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -86,12 +85,6 @@ import java.util.stream.StreamSupport; class Cat { - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - private static void printCapabilities(ServerCapabilities capabilities) { System.out.println(capabilities); } @@ -108,8 +101,7 @@ private static void printAction(ByteString actionBlob) { } private static void printAction(int level, Action action) { - indentOut( - level + 1, "Command Digest: Command " + DigestUtil.toString(action.getCommandDigest())); + indentOut(level, "Command Digest: Command " + DigestUtil.toString(action.getCommandDigest())); indentOut( level, "Input Root Digest: Directory " + DigestUtil.toString(action.getInputRootDigest())); indentOut(level, "DoNotCache: " + (action.getDoNotCache() ? "true" : "false")); @@ -120,6 +112,8 @@ private static void printAction(int level, Action action) { + (action.getTimeout().getSeconds() + action.getTimeout().getNanos() / 1e9) + "s"); } + indentOut(level, "Salt: " + action.getSalt()); + indentOut(level, "Platform: " + action.getPlatform()); } private static void printCommand(ByteString commandBlob) { @@ -483,6 +477,9 @@ private static void printRequestMetadata(RequestMetadata metadata) { System.out.println("ActionId: " + metadata.getActionId()); System.out.println("ToolInvocationId: " + metadata.getToolInvocationId()); System.out.println("CorrelatedInvocationsId: " + metadata.getCorrelatedInvocationsId()); + System.out.println("ActionMnemonic: " + metadata.getActionMnemonic()); + System.out.println("TargetId: " + metadata.getTargetId()); + System.out.println("ConfigurationId: " + metadata.getConfigurationId()); } private static void printStatus(com.google.rpc.Status status) @@ -654,6 +651,9 @@ private static void getWorkerProfile(Instance instance) { private static void printStageInformation(StageInformation stage) { System.out.printf("%s slots configured: %d%n", stage.getName(), stage.getSlotsConfigured()); System.out.printf("%s slots used %d%n", stage.getName(), stage.getSlotsUsed()); + for (String operationName : stage.getOperationNamesList()) { + System.out.printf("%s operation %s\n", stage.getName(), operationName); + } } private static void printOperationTime(OperationTimesBetweenStages time) { @@ -929,7 +929,8 @@ protected void run(Instance instance, Digest digest) throws Exception { static class TreeLayout extends DigestsCommand { @Override public String description() { - return "Rich tree layout of root directory [digests...], with weighting and missing tolerance"; + return "Rich tree layout of root directory [digests...], with weighting and missing" + + " tolerance"; } @Override @@ -1009,7 +1010,8 @@ protected void run(Instance instance, ByteString blob) { static class DumpQueuedOperation extends BlobCommand { @Override public String description() { - return "Binary QueuedOperation [digests...] content, suitable for retention in local 'blobs' directory and use with bf-executor"; + return "Binary QueuedOperation [digests...] content, suitable for retention in local 'blobs'" + + " directory and use with bf-executor"; } @Override diff --git a/src/main/java/build/buildfarm/tools/Executor.java b/src/main/java/build/buildfarm/tools/Executor.java index 77ca4002c1..4d18a28084 100644 --- a/src/main/java/build/buildfarm/tools/Executor.java +++ b/src/main/java/build/buildfarm/tools/Executor.java @@ -15,6 +15,7 @@ package build.buildfarm.tools; import static build.bazel.remote.execution.v2.ExecutionStage.Value.EXECUTING; +import static build.buildfarm.common.grpc.Channels.createChannel; import static build.buildfarm.common.io.Utils.stat; import static build.buildfarm.instance.stub.ByteStreamUploader.uploadResourceName; import static com.google.common.base.Preconditions.checkState; @@ -53,8 +54,6 @@ import com.google.rpc.Code; import io.grpc.Channel; import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; import io.grpc.stub.StreamObserver; import java.io.IOException; import java.io.InputStream; @@ -223,12 +222,6 @@ static void executeActions( shutdownAndAwaitTermination(service, 1, SECONDS); } - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - private static void loadFilesIntoCAS(String instanceName, Channel channel, Path blobsDir) throws Exception { ContentAddressableStorageBlockingStub casStub = @@ -245,7 +238,7 @@ private static void loadFilesIntoCAS(String instanceName, Channel channel, Path ByteStreamStub bsStub = ByteStreamGrpc.newStub(channel); for (Digest missingDigest : missingDigests) { - Path path = blobsDir.resolve(missingDigest.getHash() + "_" + missingDigest.getSizeBytes()); + Path path = blobsDir.resolve(missingDigest.getHash()); if (missingDigest.getSizeBytes() < Size.mbToBytes(1)) { Request request = Request.newBuilder() @@ -376,7 +369,7 @@ private static List findMissingBlobs( FileStore fileStore = Files.getFileStore(blobsDir); try (DirectoryStream stream = Files.newDirectoryStream(blobsDir)) { for (Path file : stream) { - FileStatus stat = stat(file, /* followSymlinks=*/ false, fileStore); + FileStatus stat = stat(file, /* followSymlinks= */ false, fileStore); Digest digest = DigestUtil.buildDigest(file.getFileName().toString().split("_")[0], stat.getSize()); diff --git a/src/main/java/build/buildfarm/tools/Extract.java b/src/main/java/build/buildfarm/tools/Extract.java index e4de193ae2..fed81ac267 100644 --- a/src/main/java/build/buildfarm/tools/Extract.java +++ b/src/main/java/build/buildfarm/tools/Extract.java @@ -14,6 +14,7 @@ package build.buildfarm.tools; +import static build.buildfarm.common.grpc.Channels.createChannel; import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; import static java.util.concurrent.Executors.newSingleThreadExecutor; import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor; @@ -41,8 +42,6 @@ import io.grpc.ManagedChannel; import io.grpc.Status; import io.grpc.Status.Code; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; import io.grpc.stub.StreamObserver; import java.io.IOException; import java.io.InputStream; @@ -61,12 +60,6 @@ import java.util.concurrent.atomic.AtomicLong; class Extract { - static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - public static void main(String[] args) throws Exception { String host = args[0]; String instanceName = args[1]; diff --git a/src/main/java/build/buildfarm/tools/FindOperations.java b/src/main/java/build/buildfarm/tools/FindOperations.java index c858f121f6..f1d8494dff 100644 --- a/src/main/java/build/buildfarm/tools/FindOperations.java +++ b/src/main/java/build/buildfarm/tools/FindOperations.java @@ -14,14 +14,14 @@ package build.buildfarm.tools; +import static build.buildfarm.common.grpc.Channels.createChannel; + import build.buildfarm.common.DigestUtil; import build.buildfarm.instance.Instance; import build.buildfarm.instance.stub.StubInstance; import com.google.common.collect.ImmutableList; import com.google.longrunning.Operation; import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; // This tool can be used to find Operations based on their particular properties. // For example, it could find all of the operations executed by a particular user or particular @@ -29,12 +29,6 @@ // ./tool shard SHA256 // The operations that match the query will be printed. class FindOperations { - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - public static void main(String[] args) throws Exception { // get arguments for establishing an instance String host = args[0]; diff --git a/src/main/java/build/buildfarm/tools/GracefulShutdownTest.java b/src/main/java/build/buildfarm/tools/GracefulShutdown.java similarity index 85% rename from src/main/java/build/buildfarm/tools/GracefulShutdownTest.java rename to src/main/java/build/buildfarm/tools/GracefulShutdown.java index f98cddbde4..337b68be02 100644 --- a/src/main/java/build/buildfarm/tools/GracefulShutdownTest.java +++ b/src/main/java/build/buildfarm/tools/GracefulShutdown.java @@ -14,24 +14,18 @@ package build.buildfarm.tools; +import static build.buildfarm.common.grpc.Channels.createChannel; + import build.buildfarm.v1test.AdminGrpc; import build.buildfarm.v1test.DisableScaleInProtectionRequest; import build.buildfarm.v1test.PrepareWorkerForGracefulShutDownRequest; import build.buildfarm.v1test.ShutDownWorkerGracefullyRequest; import build.buildfarm.v1test.ShutDownWorkerGrpc; import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; - -class GracefulShutdownTest { - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } +class GracefulShutdown { /** - * Example command: GracefulShutdownTest ShutDown workerIp buildfarm-endpoint + * Example command: GracefulShutdown ShutDown workerIp buildfarm-endpoint * * @param args */ @@ -54,7 +48,7 @@ private static void shutDownGracefully(String[] args) { } /** - * Example command: GracefulShutdownTest PrepareWorker WorkerIp:port + * Example command: GracefulShutdown PrepareWorker WorkerIp:port * * @param args */ @@ -71,7 +65,7 @@ private static void prepareWorkerForShutDown(String[] args) { } /** - * Example command: GracefulShutdownTest DisableProtection WorkerIp buildfarm_endpoint + * Example command: GracefulShutdown DisableProtection WorkerIp buildfarm_endpoint * * @param args */ @@ -100,7 +94,8 @@ public static void main(String[] args) { break; default: System.out.println( - "The action your choose is wrong. Please choose one from ShutDown, PrepareWorker, and DisableProtection"); + "The action your choose is wrong. Please choose one from ShutDown, PrepareWorker, and" + + " DisableProtection"); break; } } diff --git a/src/main/java/build/buildfarm/tools/Hist.java b/src/main/java/build/buildfarm/tools/Hist.java index c8ec6c2bfa..2abdf55f7d 100644 --- a/src/main/java/build/buildfarm/tools/Hist.java +++ b/src/main/java/build/buildfarm/tools/Hist.java @@ -14,6 +14,8 @@ package build.buildfarm.tools; +import static build.buildfarm.common.grpc.Channels.createChannel; + import build.bazel.remote.execution.v2.ExecuteOperationMetadata; import build.bazel.remote.execution.v2.ExecutionStage; import build.buildfarm.common.DigestUtil; @@ -23,16 +25,8 @@ import com.google.longrunning.Operation; import com.google.protobuf.InvalidProtocolBufferException; import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; class Hist { - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - @SuppressWarnings("ConstantConditions") private static void printHistogramValue(int executing) { StringBuilder s = new StringBuilder(); diff --git a/src/main/java/build/buildfarm/tools/IndexWorker.java b/src/main/java/build/buildfarm/tools/IndexWorker.java index a36e3f9217..317a5ff637 100644 --- a/src/main/java/build/buildfarm/tools/IndexWorker.java +++ b/src/main/java/build/buildfarm/tools/IndexWorker.java @@ -14,25 +14,19 @@ package build.buildfarm.tools; +import static build.buildfarm.common.grpc.Channels.createChannel; + import build.buildfarm.common.CasIndexResults; import build.buildfarm.common.DigestUtil; import build.buildfarm.instance.Instance; import build.buildfarm.instance.stub.StubInstance; import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; // This tool can be used to remove worker entries from the CAS. // This is usually done via the admin service when a worker is departing from the cluster. // ./tool shard SHA256 // The results of the removal are printed after the CAS entries have been removed. class IndexWorker { - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - public static void main(String[] args) throws Exception { String host = args[0]; String instanceName = args[1]; diff --git a/src/main/java/build/buildfarm/tools/Mount.java b/src/main/java/build/buildfarm/tools/Mount.java index 43061d12bc..acd61f1f11 100644 --- a/src/main/java/build/buildfarm/tools/Mount.java +++ b/src/main/java/build/buildfarm/tools/Mount.java @@ -14,6 +14,7 @@ package build.buildfarm.tools; +import static build.buildfarm.common.grpc.Channels.createChannel; import static build.buildfarm.instance.Utils.getBlob; import static com.google.common.base.Preconditions.checkArgument; @@ -27,8 +28,6 @@ import build.buildfarm.worker.FuseCAS; import com.google.protobuf.ByteString; import io.grpc.ManagedChannel; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; import java.io.IOException; import java.io.InputStream; import java.nio.file.Path; @@ -37,17 +36,15 @@ import java.util.Map; class Mount { - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - @SuppressWarnings("BusyWait") - public static void main(String[] args) throws Exception { - String host = args[0]; - String instanceName = args[1]; - DigestUtil digestUtil = DigestUtil.forHash(args[2]); + public static void mount( + String host, + String instanceName, + DigestUtil digestUtil, + String root, + Digest inputRoot, + String name) + throws IOException, InterruptedException { ManagedChannel channel = createChannel(host); Instance instance = new StubInstance(instanceName, digestUtil, channel); @@ -55,7 +52,7 @@ public static void main(String[] args) throws Exception { FuseCAS fuse = new FuseCAS( - cwd.resolve(args[3]), + cwd.resolve(root), new InputStreamFactory() { final Map cache = new HashMap<>(); @@ -82,8 +79,7 @@ public synchronized InputStream newInput( } }); - // FIXME make bettar - fuse.createInputRoot(args[5], DigestUtil.parseDigest(args[4])); + fuse.createInputRoot(name, inputRoot); try { //noinspection InfiniteLoopStatement @@ -96,4 +92,21 @@ public synchronized InputStream newInput( fuse.stop(); } } + + public static void main(String[] args) throws Exception { + if (args.length != 6) { + System.err.println( + "Usage: bf-mount "); + System.err.println("\nMount an REAPI directory specified by 'digest' at 'name' under 'root'"); + System.exit(1); + } + + String host = args[0]; + String instanceName = args[1]; + DigestUtil digestUtil = DigestUtil.forHash(args[2]); + String root = args[3]; + Digest inputRoot = DigestUtil.parseDigest(args[4]); + String name = args[5]; + mount(host, instanceName, digestUtil, root, inputRoot, name); + } } diff --git a/src/main/java/build/buildfarm/tools/WorkerProfile.java b/src/main/java/build/buildfarm/tools/WorkerProfile.java index d820446a53..4a710cde32 100644 --- a/src/main/java/build/buildfarm/tools/WorkerProfile.java +++ b/src/main/java/build/buildfarm/tools/WorkerProfile.java @@ -14,6 +14,8 @@ package build.buildfarm.tools; +import static build.buildfarm.common.grpc.Channels.createChannel; + import build.buildfarm.common.DigestUtil; import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.common.config.ShardWorkerOptions; @@ -30,10 +32,7 @@ import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.util.Durations; import com.google.protobuf.util.JsonFormat; -import io.grpc.ManagedChannel; import io.grpc.StatusRuntimeException; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; import java.io.IOException; import java.nio.file.Paths; import java.util.HashMap; @@ -41,17 +40,11 @@ import java.util.Map; import java.util.Set; import javax.naming.ConfigurationException; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.UnifiedJedis; class WorkerProfile { private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - private static ManagedChannel createChannel(String target) { - NettyChannelBuilder builder = - NettyChannelBuilder.forTarget(target).negotiationType(NegotiationType.PLAINTEXT); - return builder.build(); - } - /** * Transform worker string from "ip-10-135-31-210.ec2:8981" to "10.135.31.210". * @@ -116,11 +109,11 @@ private static Set getWorkers(String[] args) throws ConfigurationExcepti } catch (IOException e) { System.out.println("Could not parse yml configuration file." + e); } - RedisClient client = new RedisClient(JedisClusterFactory.create().get()); + RedisClient client = new RedisClient(JedisClusterFactory.create("worker-profile").get()); return client.call(jedis -> fetchWorkers(jedis, System.currentTimeMillis())); } - private static Set fetchWorkers(JedisCluster jedis, long now) { + private static Set fetchWorkers(UnifiedJedis jedis, long now) { Set workers = Sets.newConcurrentHashSet(); for (Map.Entry entry : jedis.hgetAll(configs.getBackplane().getWorkersHashName() + "_storage").entrySet()) { @@ -161,7 +154,8 @@ private static void workerProfile(String[] args) throws IOException { } if (workers == null || workers.isEmpty()) { System.out.println( - "cannot find any workers, check the redis url and make sure there are workers in the cluster"); + "cannot find any workers, check the redis url and make sure there are workers in the" + + " cluster"); } else { // remove the unregistered workers for (String existingWorker : workersToChannels.keySet()) { diff --git a/src/main/java/build/buildfarm/worker/BUILD b/src/main/java/build/buildfarm/worker/BUILD index 3ab09723b7..016eb89092 100644 --- a/src/main/java/build/buildfarm/worker/BUILD +++ b/src/main/java/build/buildfarm/worker/BUILD @@ -4,15 +4,19 @@ java_library( plugins = ["//src/main/java/build/buildfarm/common:lombok"], visibility = ["//visibility:public"], deps = [ + "//persistentworkers/src/main/java/persistent/bazel:bazel-persistent-workers", + "//persistentworkers/src/main/java/persistent/common:persistent-common", + "//src/main/java/build/buildfarm/cas", "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/common/config", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/stub", + "//src/main/java/build/buildfarm/worker/persistent", "//src/main/java/build/buildfarm/worker/resources", + "//src/main/java/build/buildfarm/worker/util", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "@bazel//src/main/protobuf:execution_statistics_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_github_docker_java_docker_java", "@maven//:com_github_docker_java_docker_java_api", "@maven//:com_github_docker_java_docker_java_core", @@ -31,11 +35,11 @@ java_library( "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", "@maven//:io_prometheus_simpleclient", "@maven//:org_apache_commons_commons_compress", + "@maven//:org_apache_commons_commons_lang3", "@maven//:org_jetbrains_annotations", "@maven//:org_projectlombok_lombok", ], diff --git a/src/main/java/build/buildfarm/worker/DequeueMatchEvaluator.java b/src/main/java/build/buildfarm/worker/DequeueMatchEvaluator.java index c3c4c1dfc9..b3443fcabb 100644 --- a/src/main/java/build/buildfarm/worker/DequeueMatchEvaluator.java +++ b/src/main/java/build/buildfarm/worker/DequeueMatchEvaluator.java @@ -14,11 +14,12 @@ package build.buildfarm.worker; -import build.bazel.remote.execution.v2.Command; import build.bazel.remote.execution.v2.Platform; import build.buildfarm.common.ExecutionProperties; import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.v1test.QueueEntry; +import build.buildfarm.worker.resources.LocalResourceSet; +import build.buildfarm.worker.resources.LocalResourceSetUtils; import com.google.common.collect.Iterables; import com.google.common.collect.SetMultimap; import org.jetbrains.annotations.NotNull; @@ -41,27 +42,14 @@ */ public class DequeueMatchEvaluator { private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - /** - * @brief Decide whether the worker should keep the operation or put it back on the queue. - * @details Compares the platform properties of the worker to the operation's platform properties. - * @param workerProvisions The provisions of the worker. - * @param queueEntry An entry recently removed from the queue. - * @return Whether or not the worker should accept or reject the queue entry. - * @note Overloaded. - * @note Suggested return identifier: shouldKeepOperation. - */ - @SuppressWarnings("NullableProblems") - @NotNull - public static boolean shouldKeepOperation( - SetMultimap workerProvisions, QueueEntry queueEntry) { - return shouldKeepViaPlatform(workerProvisions, queueEntry.getPlatform()); - } /** * @brief Decide whether the worker should keep the operation or put it back on the queue. * @details Compares the platform properties of the worker to the operation's platform properties. * @param workerProvisions The provisions of the worker. - * @param command A command to evaluate. + * @param name Worker name. + * @param resourceSet The limited resources that the worker has available. + * @param queueEntry An entry recently removed from the queue. * @return Whether or not the worker should accept or reject the queue entry. * @note Overloaded. * @note Suggested return identifier: shouldKeepOperation. @@ -69,8 +57,10 @@ public static boolean shouldKeepOperation( @SuppressWarnings("NullableProblems") @NotNull public static boolean shouldKeepOperation( - SetMultimap workerProvisions, Command command) { - return shouldKeepViaPlatform(workerProvisions, command.getPlatform()); + SetMultimap workerProvisions, + LocalResourceSet resourceSet, + QueueEntry queueEntry) { + return shouldKeepViaPlatform(workerProvisions, resourceSet, queueEntry.getPlatform()); } /** @@ -79,6 +69,8 @@ public static boolean shouldKeepOperation( * @details Compares the platform properties of the worker to the platform properties of the * operation. * @param workerProvisions The provisions of the worker. + * @param name Worker name. + * @param resourceSet The limited resources that the worker has available. * @param platform The platforms of operation. * @return Whether or not the worker should accept or reject the operation. * @note Suggested return identifier: shouldKeepOperation. @@ -86,14 +78,11 @@ public static boolean shouldKeepOperation( @SuppressWarnings("NullableProblems") @NotNull private static boolean shouldKeepViaPlatform( - SetMultimap workerProvisions, Platform platform) { - // attempt to execute everything the worker gets off the queue. - // this is a recommended configuration. - if (configs.getWorker().getDequeueMatchSettings().isAcceptEverything()) { - return true; - } - - return satisfiesProperties(workerProvisions, platform); + SetMultimap workerProvisions, + LocalResourceSet resourceSet, + Platform platform) { + return satisfiesProperties(workerProvisions, platform) + && LocalResourceSetUtils.claimResources(platform, resourceSet); } /** @@ -131,7 +120,8 @@ private static boolean satisfiesProperties( private static boolean satisfiesProperty( SetMultimap workerProvisions, Platform.Property property) { // validate min cores - if (property.getName().equals(ExecutionProperties.MIN_CORES)) { + if (property.getName().equals(ExecutionProperties.CORES) + || property.getName().equals(ExecutionProperties.MIN_CORES)) { if (!workerProvisions.containsKey(ExecutionProperties.CORES)) { return false; } @@ -163,13 +153,13 @@ private static boolean satisfiesProperty( return possibleMemories >= memBytesRequested; } - // accept other properties not specified on the worker - if (configs.getWorker().getDequeueMatchSettings().isAllowUnmatched()) { - return true; + // ensure exact matches + if (workerProvisions.containsKey(property.getName())) { + return workerProvisions.containsEntry(property.getName(), property.getValue()) + || workerProvisions.containsEntry(property.getName(), "*"); } - // ensure exact matches - return workerProvisions.containsEntry(property.getName(), property.getValue()) - || workerProvisions.containsEntry(property.getName(), "*"); + // accept other properties not specified on the worker + return configs.getWorker().getDequeueMatchSettings().isAllowUnmatched(); } } diff --git a/src/main/java/build/buildfarm/worker/DockerExecutor.java b/src/main/java/build/buildfarm/worker/DockerExecutor.java index 924821f5c4..22ce3cb0fb 100644 --- a/src/main/java/build/buildfarm/worker/DockerExecutor.java +++ b/src/main/java/build/buildfarm/worker/DockerExecutor.java @@ -85,6 +85,7 @@ public static Code runActionWithDocker( cleanUpContainer(dockerClient, containerId); return Code.OK; } + /** * @brief Setup the container for the action. * @details This ensures the image is fetched, the container is started, and that the container @@ -113,6 +114,7 @@ private static String prepareRequestedContainer( // container is ready for running actions return containerId; } + /** * @brief Fetch the user requested image for running the action. * @details The image will not be fetched if it already exists. @@ -130,6 +132,7 @@ private static void fetchImageIfMissing( .awaitCompletion(fetchTimeout.getSeconds(), TimeUnit.SECONDS); } } + /** * @brief Check to see if the image was already available. * @details Checking to see if the image is already available can avoid having to re-fetch it. @@ -148,6 +151,7 @@ private static boolean isLocalImagePresent(DockerClient dockerClient, String ima } return true; } + /** * @brief Get all the host paths that should be populated into the container. * @details Paths with use docker's copy archive API. @@ -161,6 +165,7 @@ private static List getPopulatePaths(Path execDir) { paths.addAll(Utils.getSymbolicLinkReferences(execDir)); return paths; } + /** * @brief Populate the container as needed by copying files into it. * @details This may or may not be necessary depending on mounts / volumes. @@ -174,6 +179,7 @@ private static void populateContainer( copyPathIntoContainer(dockerClient, containerId, path); } } + /** * @brief Copies the file or directory into the container. * @details Copies all folder descendants. @@ -190,6 +196,7 @@ private static void copyPathIntoContainer( cmd.withRemotePath(path.toAbsolutePath().toString()); cmd.exec(); } + /** * @brief Get the exit code of the action that was executed inside the container. * @details Docker stores the exit code after the execution and it can be queried with an execId. @@ -203,6 +210,7 @@ private static void extractExitCode( InspectExecResponse response = inspectExecCmd.exec(); resultBuilder.setExitCode(response.getExitCodeLong().intValue()); } + /** * @brief Extract information from the container after the action ran. * @details This can include exit code, output artifacts, and various docker information. @@ -222,6 +230,7 @@ private static void extractInformationFromContainer( extractExitCode(dockerClient, execId, resultBuilder); copyOutputsOutOfContainer(dockerClient, settings, containerId); } + /** * @brief Copies action outputs out of the container. * @details The outputs are known by the operation context. @@ -241,6 +250,7 @@ private static void copyOutputsOutOfContainer( outputDirPath.toFile().mkdirs(); } } + /** * @brief Delete the container. * @details Forces container deletion. @@ -254,6 +264,7 @@ private static void cleanUpContainer(DockerClient dockerClient, String container log.log(Level.SEVERE, "couldn't shutdown container: ", e); } } + /** * @brief Assuming the container is already created and properly populated/mounted with data, this * can be used to spawn an action inside of it. @@ -289,6 +300,7 @@ private static String runActionInsideContainer( return execId; } + /** * @brief Create a docker container for the action to run in. * @details We can use a separate container per action or keep containers alive and re-use them. @@ -317,6 +329,7 @@ private static String createContainer( // container is ready and started return response.getId(); } + /** * @brief Create a host config used for container creation. * @details This can determine container mounts and volumes. @@ -329,6 +342,7 @@ private static HostConfig getHostConfig(Path execDir) { mountExecRoot(config, execDir); return config; } + /** * @brief Add paths needed to mount the exec root. * @details These are added to the host config. @@ -350,6 +364,7 @@ private static void mountExecRoot(HostConfig config, Path execDir) { config.withBinds(binds); } + /** * @brief Copy the given file out of the container to the same host path. * @details The file is extracted as a tar and deserialized. diff --git a/src/main/java/build/buildfarm/worker/ExecDirException.java b/src/main/java/build/buildfarm/worker/ExecDirException.java new file mode 100644 index 0000000000..c7c909597f --- /dev/null +++ b/src/main/java/build/buildfarm/worker/ExecDirException.java @@ -0,0 +1,139 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker; + +import static build.buildfarm.common.Errors.MISSING_INPUT; +import static build.buildfarm.common.Errors.VIOLATION_TYPE_INVALID; +import static build.buildfarm.common.Errors.VIOLATION_TYPE_MISSING; +import static java.util.logging.Level.SEVERE; + +import build.bazel.remote.execution.v2.Digest; +import build.buildfarm.cas.cfc.PutDirectoryException; +import build.buildfarm.common.DigestUtil; +import com.google.protobuf.Any; +import com.google.rpc.Code; +import com.google.rpc.PreconditionFailure; +import com.google.rpc.PreconditionFailure.Violation; +import com.google.rpc.Status; +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.util.List; +import lombok.extern.java.Log; + +@Log +public class ExecDirException extends IOException { + private final Path path; + private final List exceptions; + + public static class ViolationException extends Exception { + private final Digest digest; + private final Path path; + private final boolean isExecutable; + + public ViolationException(Digest digest, Path path, boolean isExecutable, Throwable cause) { + super(cause); + this.digest = digest; + this.path = path; + this.isExecutable = isExecutable; + } + + private static String getDescription(Path path, boolean isExecutable) { + if (path != null) { + return "The file `/" + path + (isExecutable ? "*" : "") + "` was not found in the CAS."; + } + return MISSING_INPUT; + } + + static void toViolation( + Violation.Builder violation, Throwable cause, Path path, boolean isExecutable) { + if (cause instanceof NoSuchFileException) { + violation + .setType(VIOLATION_TYPE_MISSING) + .setDescription(getDescription(path, isExecutable)); + } else { + violation.setType(VIOLATION_TYPE_INVALID).setDescription(cause.getMessage()); + } + } + + public Violation getViolation() { + Violation.Builder violation = Violation.newBuilder(); + toViolation(violation, getCause(), path, isExecutable); + violation.setSubject("blobs/" + DigestUtil.toString(digest)); + return violation.build(); + } + } + + private static String getErrorMessage(Path path, List exceptions) { + return String.format("%s: %d %s: %s", path, exceptions.size(), "exceptions", exceptions); + } + + public ExecDirException(Path path, List exceptions) { + // When printing the exception, show the captured sub-exceptions. + super(getErrorMessage(path, exceptions)); + this.path = path; + this.exceptions = exceptions; + for (Throwable exception : exceptions) { + addSuppressed(exception); + } + } + + Path getPath() { + return path; + } + + List getExceptions() { + return exceptions; + } + + Status.Builder toStatus(Status.Builder status) { + status.setCode(Code.FAILED_PRECONDITION.getNumber()); + + // aggregate into a single preconditionFailure + PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder(); + for (Throwable exception : exceptions) { + if (exception instanceof ViolationException) { + ViolationException violationException = (ViolationException) exception; + preconditionFailure.addViolations(violationException.getViolation()); + } else if (exception instanceof PutDirectoryException) { + PutDirectoryException putDirException = (PutDirectoryException) exception; + for (Throwable putDirCause : putDirException.getExceptions()) { + if (putDirCause instanceof IOException) { + Violation.Builder violation = preconditionFailure.addViolationsBuilder(); + ViolationException.toViolation( + violation, putDirCause, /* path= */ null, /* isExecutable= */ false); + if (putDirCause instanceof NoSuchFileException) { + violation.setSubject("blobs/" + putDirCause.getMessage()); + } else { + log.log(SEVERE, "unrecognized put dir cause exception", putDirCause); + violation.setSubject("blobs/" + DigestUtil.toString(putDirException.getDigest())); + } + } else { + log.log(SEVERE, "unrecognized put dir exception", putDirCause); + status.setCode(Code.INTERNAL.getNumber()); + } + } + } else { + log.log(SEVERE, "unrecognized exec dir exception", exception); + status.setCode(Code.INTERNAL.getNumber()); + } + } + if (preconditionFailure.getViolationsCount() > 0) { + status.addDetails(Any.pack(preconditionFailure.build())); + } + + return status; + } +} diff --git a/src/main/java/build/buildfarm/worker/ExecuteActionStage.java b/src/main/java/build/buildfarm/worker/ExecuteActionStage.java index 6c5e247a8f..f1ad546676 100644 --- a/src/main/java/build/buildfarm/worker/ExecuteActionStage.java +++ b/src/main/java/build/buildfarm/worker/ExecuteActionStage.java @@ -107,7 +107,7 @@ public void releaseExecutor( int slotUsage = removeAndRelease(operationName, claims); executionTime.observe(usecs / 1000.0); executionStallTime.observe(stallUSecs / 1000.0); - logComplete( + complete( operationName, usecs, stallUSecs, @@ -141,7 +141,7 @@ protected void iterate() throws InterruptedException { executors.add(executorThread); int slotUsage = executorClaims.addAndGet(limits.cpu.claimed); executionSlotUsage.set(slotUsage); - logStart(operationContext.operation.getName(), getUsage(slotUsage)); + start(operationContext.operation.getName(), getUsage(slotUsage)); executorThread.start(); } } diff --git a/src/main/java/build/buildfarm/worker/Executor.java b/src/main/java/build/buildfarm/worker/Executor.java index 588d38c208..de8a7d0fcf 100644 --- a/src/main/java/build/buildfarm/worker/Executor.java +++ b/src/main/java/build/buildfarm/worker/Executor.java @@ -36,12 +36,16 @@ import build.buildfarm.common.config.ExecutionPolicy; import build.buildfarm.common.config.ExecutionWrapper; import build.buildfarm.v1test.ExecutingOperationMetadata; +import build.buildfarm.v1test.Tree; import build.buildfarm.worker.WorkerContext.IOResource; +import build.buildfarm.worker.persistent.PersistentExecutor; +import build.buildfarm.worker.persistent.WorkFilesContext; import build.buildfarm.worker.resources.ResourceLimits; import com.github.dockerjava.api.DockerClient; import com.github.dockerjava.core.DockerClientBuilder; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import com.google.devtools.build.lib.shell.Protos.ExecutionStatistics; import com.google.longrunning.Operation; import com.google.protobuf.Any; @@ -110,9 +114,7 @@ private long runInterruptible(Stopwatch stopwatch, ResourceLimits limits) } Operation operation = - operationContext - .operation - .toBuilder() + operationContext.operation.toBuilder() .setMetadata( Any.pack( ExecutingOperationMetadata.newBuilder() @@ -199,7 +201,7 @@ private long executePolled( Stopwatch stopwatch) throws InterruptedException { /* execute command */ - log.log(Level.FINE, "Executor: Operation " + operation.getName() + " Executing command"); + log.log(Level.FINER, "Executor: Operation " + operation.getName() + " Executing command"); ActionResult.Builder resultBuilder = operationContext.executeResponse.getResultBuilder(); resultBuilder @@ -291,7 +293,7 @@ private long executePolled( long executeUSecs = stopwatch.elapsed(MICROSECONDS); log.log( - Level.FINE, + Level.FINER, String.format( "Executor::executeCommand(%s): Completed command: exit code %d", operationName, resultBuilder.getExitCode())); @@ -309,7 +311,7 @@ private long executePolled( throw e; } } else { - log.log(Level.FINE, "Executor: Operation " + operationName + " Failed to claim output"); + log.log(Level.FINER, "Executor: Operation " + operationName + " Failed to claim output"); boolean wasInterrupted = Thread.interrupted(); try { putError(); @@ -359,6 +361,9 @@ public void run(ResourceLimits limits) { } finally { boolean wasInterrupted = Thread.interrupted(); try { + // Now that the execution has finished we can return any of the claims against local + // resources. + workerContext.returnLocalResources(operationContext.queueEntry); owner.releaseExecutor( operationName, limits.cpu.claimed, @@ -424,16 +429,38 @@ private Code executeCommand( for (EnvironmentVariable environmentVariable : environmentVariables) { environment.put(environmentVariable.getName(), environmentVariable.getValue()); } - for (Map.Entry environmentVariable : - limits.extraEnvironmentVariables.entrySet()) { - environment.put(environmentVariable.getKey(), environmentVariable.getValue()); - } + environment.putAll(limits.extraEnvironmentVariables); // allow debugging before an execution if (limits.debugBeforeExecution) { return ExecutionDebugger.performBeforeExecutionDebug(processBuilder, limits, resultBuilder); } + boolean usePersistentWorker = + !limits.persistentWorkerKey.isEmpty() && !limits.persistentWorkerCommand.isEmpty(); + + if (usePersistentWorker) { + log.fine( + "usePersistentWorker; got persistentWorkerCommand of : " + + limits.persistentWorkerCommand); + + Tree execTree = operationContext.tree; + + WorkFilesContext filesContext = + WorkFilesContext.fromContext(execDir, execTree, operationContext.command); + + return PersistentExecutor.runOnPersistentWorker( + limits.persistentWorkerCommand, + filesContext, + operationName, + ImmutableList.copyOf(arguments), + ImmutableMap.copyOf(environment), + limits, + timeout, + PersistentExecutor.defaultWorkRootsDir, + resultBuilder); + } + // run the action under docker if (limits.containerSettings.enabled) { DockerClient dockerClient = DockerClientBuilder.getInstance().build(); diff --git a/src/main/java/build/buildfarm/worker/FuseCAS.java b/src/main/java/build/buildfarm/worker/FuseCAS.java index 68489cbe2e..391b3ce3b7 100644 --- a/src/main/java/build/buildfarm/worker/FuseCAS.java +++ b/src/main/java/build/buildfarm/worker/FuseCAS.java @@ -399,7 +399,7 @@ private synchronized void incMounts() throws IOException { log.log(Level.INFO, "Mounting FuseCAS"); String[] fuseOpts = {"-o", "max_write=131072", "-o", "big_writes"}; try { - mount(mountPath, /* blocking=*/ false, /* debug=*/ false, /* fuseOpts=*/ fuseOpts); + mount(mountPath, /* blocking= */ false, /* debug= */ false, /* fuseOpts= */ fuseOpts); } catch (FuseException e) { throw new IOException(e); } @@ -547,6 +547,9 @@ public int getattr(String path, FileStat stat) { return -ErrorCodes.ENOENT(); } + // stock block size + stat.st_blksize.set(4096); + if (entry.isSymlink()) { stat.st_mode.set(FileStat.S_IFLNK | 0777); } else if (entry.isDirectory()) { @@ -554,9 +557,13 @@ public int getattr(String path, FileStat stat) { } else { int mode = entry.isExecutable() ? 0555 : 0444; stat.st_mode.set(FileStat.S_IFREG | mode); - stat.st_nlink.set(1); - stat.st_size.set(entry.size()); + stat.st_nlink.set(1); // should fix this for number of digests pointing to it } + long size = entry.size(); + long blksize = stat.st_blksize.get(); + long blocks = (size + blksize - 1) / blksize; + stat.st_size.set(size); + stat.st_blocks.set(blocks); return 0; } diff --git a/src/main/java/build/buildfarm/worker/InputFetchStage.java b/src/main/java/build/buildfarm/worker/InputFetchStage.java index 6b0f741fd8..3953ef9595 100644 --- a/src/main/java/build/buildfarm/worker/InputFetchStage.java +++ b/src/main/java/build/buildfarm/worker/InputFetchStage.java @@ -72,13 +72,14 @@ public void releaseInputFetcher( int size = removeAndRelease(operationName); inputFetchTime.observe(usecs / 1000.0); inputFetchStallTime.observe(stallUSecs / 1000.0); - logComplete( + complete( operationName, usecs, stallUSecs, String.format("%s, %s", success ? "Success" : "Failure", getUsage(size))); } + @Override public int getSlotUsage() { return fetchers.size(); } @@ -106,8 +107,7 @@ protected void iterate() throws InterruptedException { fetchers.add(fetcher); int slotUsage = fetchers.size(); inputFetchSlotUsage.set(slotUsage); - logStart( - operationContext.queueEntry.getExecuteEntry().getOperationName(), getUsage(slotUsage)); + start(operationContext.queueEntry.getExecuteEntry().getOperationName(), getUsage(slotUsage)); fetcher.start(); } } diff --git a/src/main/java/build/buildfarm/worker/InputFetcher.java b/src/main/java/build/buildfarm/worker/InputFetcher.java index 667ccfa0e5..df841cbc4c 100644 --- a/src/main/java/build/buildfarm/worker/InputFetcher.java +++ b/src/main/java/build/buildfarm/worker/InputFetcher.java @@ -15,7 +15,6 @@ package build.buildfarm.worker; import static build.bazel.remote.execution.v2.ExecutionStage.Value.QUEUED; -import static build.buildfarm.common.Errors.VIOLATION_TYPE_INVALID; import static java.lang.String.format; import static java.util.concurrent.TimeUnit.DAYS; import static java.util.concurrent.TimeUnit.MICROSECONDS; @@ -30,13 +29,18 @@ import build.bazel.remote.execution.v2.FileNode; import build.buildfarm.common.OperationFailer; import build.buildfarm.common.ProxyDirectoriesIndex; +import build.buildfarm.v1test.ExecuteEntry; import build.buildfarm.v1test.QueuedOperation; +import build.buildfarm.v1test.Tree; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Stopwatch; import com.google.common.collect.Iterables; import com.google.longrunning.Operation; import com.google.protobuf.Duration; import com.google.protobuf.util.Durations; import com.google.protobuf.util.Timestamps; +import com.google.rpc.Code; +import com.google.rpc.Status; import io.grpc.Deadline; import java.io.IOException; import java.nio.file.Path; @@ -162,9 +166,10 @@ static String getExecutablePath( return null; } - private long fetchPolled(Stopwatch stopwatch) throws InterruptedException { + @VisibleForTesting + long fetchPolled(Stopwatch stopwatch) throws InterruptedException { String operationName = operationContext.queueEntry.getExecuteEntry().getOperationName(); - log.log(Level.FINE, format("fetching inputs: %s", operationName)); + log.log(Level.FINER, format("fetching inputs: %s", operationName)); ExecutedActionMetadata.Builder executedAction = operationContext @@ -196,8 +201,15 @@ private long fetchPolled(Stopwatch stopwatch) throws InterruptedException { queuedOperation.getAction(), queuedOperation.getCommand()); } catch (IOException e) { - log.log(Level.SEVERE, format("error creating exec dir for %s", operationName), e); - failOperation("Error creating exec dir", e.toString()); + Status.Builder status = Status.newBuilder().setMessage("Error creating exec dir"); + if (e instanceof ExecDirException) { + ExecDirException execDirEx = (ExecDirException) e; + execDirEx.toStatus(status); + } else { + status.setCode(Code.INTERNAL.getNumber()); + log.log(Level.SEVERE, format("error creating exec dir for %s", operationName), e); + } + failOperation(status.build()); return 0; } success = true; @@ -206,9 +218,7 @@ private long fetchPolled(Stopwatch stopwatch) throws InterruptedException { String programName = queuedOperation.getCommand().getArguments(0); Directory root = directoriesIndex.get(queuedOperation.getTree().getRootDigest()); Command command = - queuedOperation - .getCommand() - .toBuilder() + queuedOperation.getCommand().toBuilder() .clearArguments() .addArguments(getExecutablePath(programName, root, directoriesIndex)) .addAllArguments(Iterables.skip(queuedOperation.getCommand().getArgumentsList(), 1)) @@ -221,7 +231,7 @@ private long fetchPolled(Stopwatch stopwatch) throws InterruptedException { boolean completed = false; try { long fetchUSecs = stopwatch.elapsed(MICROSECONDS); - proceedToOutput(queuedOperation.getAction(), command, execDir); + proceedToOutput(queuedOperation.getAction(), command, execDir, queuedOperation.getTree()); completed = true; return stopwatch.elapsed(MICROSECONDS) - fetchUSecs; } finally { @@ -237,7 +247,7 @@ private long fetchPolled(Stopwatch stopwatch) throws InterruptedException { } } - private void proceedToOutput(Action action, Command command, Path execDir) + private void proceedToOutput(Action action, Command command, Path execDir, Tree tree) throws InterruptedException { // switch poller to disable deadline operationContext.poller.pause(); @@ -250,11 +260,11 @@ private void proceedToOutput(Action action, Command command, Path execDir) Deadline.after(10, DAYS)); OperationContext fetchedOperationContext = - operationContext - .toBuilder() + operationContext.toBuilder() .setExecDir(execDir) .setAction(action) .setCommand(command) + .setTree(tree) .build(); boolean claimed = owner.output().claim(fetchedOperationContext); operationContext.poller.pause(); @@ -267,7 +277,7 @@ private void proceedToOutput(Action action, Command command, Path execDir) } } else { String operationName = operationContext.queueEntry.getExecuteEntry().getOperationName(); - log.log(Level.FINE, "InputFetcher: Operation " + operationName + " Failed to claim output"); + log.log(Level.FINER, "InputFetcher: Operation " + operationName + " Failed to claim output"); owner.error().put(operationContext); } @@ -311,15 +321,10 @@ public void run() { } } - private void failOperation(String failureMessage, String failureDetails) - throws InterruptedException { + private void failOperation(Status status) throws InterruptedException { + ExecuteEntry executeEntry = operationContext.queueEntry.getExecuteEntry(); Operation failedOperation = - OperationFailer.get( - operationContext.operation, - operationContext.queueEntry.getExecuteEntry(), - VIOLATION_TYPE_INVALID, - failureMessage, - failureDetails); + OperationFailer.get(operationContext.operation, executeEntry, status); try { workerContext.putOperation(failedOperation); @@ -327,7 +332,7 @@ private void failOperation(String failureMessage, String failureDetails) operationContext.toBuilder().setOperation(failedOperation).build(); owner.error().put(newOperationContext); } catch (Exception e) { - String operationName = operationContext.queueEntry.getExecuteEntry().getOperationName(); + String operationName = executeEntry.getOperationName(); log.log(Level.SEVERE, format("Cannot report failed operation %s", operationName), e); } } diff --git a/src/main/java/build/buildfarm/worker/MatchStage.java b/src/main/java/build/buildfarm/worker/MatchStage.java index e245ee68d6..48c60a91f1 100644 --- a/src/main/java/build/buildfarm/worker/MatchStage.java +++ b/src/main/java/build/buildfarm/worker/MatchStage.java @@ -81,8 +81,7 @@ public boolean onEntry(@Nullable QueueEntry queueEntry) throws InterruptedExcept Preconditions.checkState(poller == null); operationContext = - operationContext - .toBuilder() + operationContext.toBuilder() .setQueueEntry(queueEntry) .setPoller(workerContext.createPoller("MatchStage", queueEntry, QUEUED)) .build(); @@ -95,20 +94,15 @@ public void onError(Throwable t) { throw new RuntimeException(t); } - @Override - public void setOnCancelHandler(Runnable onCancelHandler) { - // never called, only blocking stub used - } - @SuppressWarnings("SameReturnValue") private boolean onOperationPolled() throws InterruptedException { String operationName = operationContext.queueEntry.getExecuteEntry().getOperationName(); - logStart(operationName); + start(operationName); long matchingAtUSecs = stopwatch.elapsed(MICROSECONDS); OperationContext matchedOperationContext = match(operationContext); long matchedInUSecs = stopwatch.elapsed(MICROSECONDS) - matchingAtUSecs; - logComplete(operationName, matchedInUSecs, waitDuration, true); + complete(operationName, matchedInUSecs, waitDuration, true); matchedOperationContext.poller.pause(); try { output.put(matchedOperationContext); @@ -139,7 +133,7 @@ protected void iterate() throws InterruptedException { } MatchOperationListener listener = new MatchOperationListener(operationContext, stopwatch); try { - logStart(); + start(); workerContext.match(listener); } finally { if (!listener.wasMatched()) { diff --git a/src/main/java/build/buildfarm/worker/OperationContext.java b/src/main/java/build/buildfarm/worker/OperationContext.java index e07649d03a..027d364afd 100644 --- a/src/main/java/build/buildfarm/worker/OperationContext.java +++ b/src/main/java/build/buildfarm/worker/OperationContext.java @@ -19,16 +19,18 @@ import build.bazel.remote.execution.v2.ExecuteResponse; import build.buildfarm.common.Poller; import build.buildfarm.v1test.QueueEntry; +import build.buildfarm.v1test.Tree; import com.google.longrunning.Operation; import java.nio.file.Path; -final class OperationContext { +public final class OperationContext { final ExecuteResponse.Builder executeResponse; final Operation operation; final Poller poller; final Path execDir; final Action action; final Command command; + final Tree tree; final QueueEntry queueEntry; private OperationContext( @@ -38,6 +40,7 @@ private OperationContext( Path execDir, Action action, Command command, + Tree tree, QueueEntry queueEntry) { this.executeResponse = executeResponse; this.operation = operation; @@ -45,6 +48,7 @@ private OperationContext( this.execDir = execDir; this.action = action; this.command = command; + this.tree = tree; this.queueEntry = queueEntry; } @@ -55,6 +59,7 @@ public static class Builder { private Path execDir; private Action action; private Command command; + private Tree tree; private QueueEntry queueEntry; private Builder( @@ -64,6 +69,7 @@ private Builder( Path execDir, Action action, Command command, + Tree tree, QueueEntry queueEntry) { this.executeResponse = executeResponse; this.operation = operation; @@ -71,14 +77,10 @@ private Builder( this.execDir = execDir; this.action = action; this.command = command; + this.tree = tree; this.queueEntry = queueEntry; } - public Builder setExecuteResponseBuilder(ExecuteResponse.Builder executeResponse) { - this.executeResponse = executeResponse; - return this; - } - public Builder setOperation(Operation operation) { this.operation = operation; return this; @@ -104,6 +106,11 @@ public Builder setCommand(Command command) { return this; } + public Builder setTree(Tree tree) { + this.tree = tree; + return this; + } + public Builder setQueueEntry(QueueEntry queueEntry) { this.queueEntry = queueEntry; return this; @@ -111,22 +118,24 @@ public Builder setQueueEntry(QueueEntry queueEntry) { public OperationContext build() { return new OperationContext( - executeResponse, operation, poller, execDir, action, command, queueEntry); + executeResponse, operation, poller, execDir, action, command, tree, queueEntry); } } public static Builder newBuilder() { return new Builder( - /* executeResponse=*/ ExecuteResponse.newBuilder(), - /* operation=*/ null, - /* poller=*/ null, - /* execDir=*/ null, - /* action=*/ null, - /* command=*/ null, - /* queueEntry=*/ null); + /* executeResponse= */ ExecuteResponse.newBuilder(), + /* operation= */ null, + /* poller= */ null, + /* execDir= */ null, + /* action= */ null, + /* command= */ null, + /* tree= */ null, + /* queueEntry= */ null); } public Builder toBuilder() { - return new Builder(executeResponse, operation, poller, execDir, action, command, queueEntry); + return new Builder( + executeResponse, operation, poller, execDir, action, command, tree, queueEntry); } } diff --git a/src/main/java/build/buildfarm/worker/OutputDirectory.java b/src/main/java/build/buildfarm/worker/OutputDirectory.java index 6245553803..caf77b273e 100644 --- a/src/main/java/build/buildfarm/worker/OutputDirectory.java +++ b/src/main/java/build/buildfarm/worker/OutputDirectory.java @@ -161,10 +161,15 @@ private static OutputDirectory parseDirectories(Iterable o Iterables.addAll(sortedOutputDirs, outputDirs); Collections.sort(sortedOutputDirs); + String currentOutputDir = ""; Builder currentBuilder = builder; String prefix = "/"; for (OutputDirectoryEntry entry : sortedOutputDirs) { String outputDir = entry.outputDirectory; + if (outputDir == currentOutputDir) { + continue; + } + currentOutputDir = outputDir; while (!outputDir.startsWith(prefix)) { currentBuilder = stack.pop(); int upPathSeparatorIndex = prefix.lastIndexOf('/', prefix.length() - 2); diff --git a/src/main/java/build/buildfarm/worker/Pipeline.java b/src/main/java/build/buildfarm/worker/Pipeline.java index 0ed4e7b40a..a753cf3fe1 100644 --- a/src/main/java/build/buildfarm/worker/Pipeline.java +++ b/src/main/java/build/buildfarm/worker/Pipeline.java @@ -14,7 +14,6 @@ package build.buildfarm.worker; -import com.google.common.util.concurrent.SettableFuture; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -25,33 +24,26 @@ @Log public class Pipeline { private final Map stageThreads; - private final PipelineStageThreadGroup stageThreadGroup; private final Map stageClosePriorities; private Thread joiningThread = null; private boolean closing = false; + // FIXME ThreadGroup? + public Pipeline() { stageThreads = new HashMap<>(); stageClosePriorities = new HashMap<>(); - stageThreadGroup = new PipelineStageThreadGroup(); } public void add(PipelineStage stage, int closePriority) { - stageThreads.put(stage, new Thread(stageThreadGroup, stage, stage.name())); + stageThreads.put(stage, new Thread(stage)); if (closePriority < 0) { throw new IllegalArgumentException("closePriority cannot be negative"); } stageClosePriorities.put(stage, closePriority); } - /** - * Start the pipeline. - * - *

You can provide callback which is invoked when any stage has an uncaught exception, for - * instance to shutdown the worker gracefully - */ - public void start(SettableFuture uncaughtExceptionFuture) { - stageThreadGroup.setUncaughtExceptionFuture(uncaughtExceptionFuture); + public void start() { for (Thread stageThread : stageThreads.values()) { stageThread.start(); } @@ -70,9 +62,12 @@ public void close() throws InterruptedException { /** Inform MatchStage to stop matching or picking up new Operations from queue. */ public void stopMatchingOperations() { - for (PipelineStage stage : stageClosePriorities.keySet()) { + for (Map.Entry entry : stageThreads.entrySet()) { + PipelineStage stage = entry.getKey(); if (stage instanceof MatchStage) { - ((MatchStage) stage).prepareForGracefulShutdown(); + MatchStage matchStage = (MatchStage) stage; + matchStage.prepareForGracefulShutdown(); + entry.getValue().interrupt(); return; } } @@ -143,15 +138,17 @@ private void join(boolean closeStage) throws InterruptedException { } } if (stageToClose != null && !stageToClose.isClosed()) { - log.log(Level.FINE, "Closing stage at priority " + maxPriority); + log.log(Level.FINER, "Closing stage " + stageToClose + " at priority " + maxPriority); stageToClose.close(); } } + boolean longStageWait = !closeStage; for (Map.Entry stageThread : stageThreads.entrySet()) { PipelineStage stage = stageThread.getKey(); Thread thread = stageThread.getValue(); try { - thread.join(closeStage ? 1 : 1000); + // 0 is wait forever, no instant wait + thread.join(longStageWait ? 1000 : 1); } catch (InterruptedException e) { if (!closeStage) { synchronized (this) { @@ -166,7 +163,7 @@ private void join(boolean closeStage) throws InterruptedException { if (!thread.isAlive()) { log.log( - Level.FINE, + Level.FINER, "Stage " + stage.name() + " has exited at priority " @@ -181,8 +178,8 @@ private void join(boolean closeStage) throws InterruptedException { + stageClosePriorities.get(stage)); thread.interrupt(); } + longStageWait = false; } - closeStage = false; for (PipelineStage stage : inactiveStages) { synchronized (this) { stageThreads.remove(stage); diff --git a/src/main/java/build/buildfarm/worker/PipelineStage.java b/src/main/java/build/buildfarm/worker/PipelineStage.java index da34172a2a..b33cf9ea5e 100644 --- a/src/main/java/build/buildfarm/worker/PipelineStage.java +++ b/src/main/java/build/buildfarm/worker/PipelineStage.java @@ -14,11 +14,13 @@ package build.buildfarm.worker; +import static java.lang.String.format; import static java.util.concurrent.TimeUnit.MICROSECONDS; import com.google.common.base.Stopwatch; import java.util.logging.Level; import java.util.logging.Logger; +import javax.annotation.Nullable; public abstract class PipelineStage implements Runnable { protected final String name; @@ -30,6 +32,7 @@ public abstract class PipelineStage implements Runnable { private volatile boolean closed = false; private Thread tickThread = null; private boolean tickCancelledFlag = false; + private String operationName = null; PipelineStage( String name, WorkerContext workerContext, PipelineStage output, PipelineStage error) { @@ -39,28 +42,60 @@ public abstract class PipelineStage implements Runnable { this.error = error; } - private void runInterruptible() throws InterruptedException { + public String getName() { + return name; + } + + protected void runInterruptible() throws InterruptedException { while (!output.isClosed() || isClaimed()) { iterate(); } } + public @Nullable String getOperationName() { + return operationName; + } + @Override public void run() { - try { - runInterruptible(); - } catch (InterruptedException e) { - // ignore - } finally { - boolean wasInterrupted = Thread.interrupted(); + boolean keepRunningStage = true; + while (keepRunningStage) { try { - close(); - } finally { - if (wasInterrupted) { - Thread.currentThread().interrupt(); - } + runInterruptible(); + + // If the run finishes without exception, the stage can also stop running. + keepRunningStage = false; + + } catch (Exception e) { + keepRunningStage = decideTermination(e); } } + + close(); + } + + /** + * @brief When the stage has an uncaught exception, this method determines whether the pipeline + * stage should terminate. + * @details This is a customization of the pipeline stage to allow logging exceptions but keeping + * the pipeline stage running. + * @return Whether the stage should terminate or continue running. + */ + private boolean decideTermination(Exception e) { + // This is a normal way for the pipeline stage to terminate. + // If an interrupt is received, there is no reason to continue the pipeline stage. + if (e instanceof InterruptedException) { + getLogger() + .log(Level.INFO, String.format("%s::run(): stage terminated due to interrupt", name)); + return false; + } + + // On the other hand, this is an abnormal way for a pipeline stage to terminate. + // For robustness of the distributed system, we may want to log the error but continue the + // pipeline stage. + getLogger() + .log(Level.SEVERE, String.format("%s::run(): stage terminated due to exception", name), e); + return true; } public String name() { @@ -90,7 +125,7 @@ protected void iterate() throws InterruptedException { Stopwatch stopwatch = Stopwatch.createUnstarted(); try { operationContext = take(); - logStart(operationContext.operation.getName()); + start(operationContext.operation.getName()); stopwatch.start(); boolean valid = false; tickThread = Thread.currentThread(); @@ -124,35 +159,38 @@ protected void iterate() throws InterruptedException { } after(operationContext); long usecs = stopwatch.elapsed(MICROSECONDS); - logComplete( - operationContext.operation.getName(), usecs, stallUSecs, nextOperationContext != null); + complete(operationName, usecs, stallUSecs, nextOperationContext != null); + operationName = null; } private String logIterateId(String operationName) { - return String.format("%s::iterate(%s)", name, operationName); + return format("%s::iterate(%s)", name, operationName); } - protected void logStart() { - logStart(""); + protected void start() { + start(""); } - protected void logStart(String operationName) { - logStart(operationName, "Starting"); + protected void start(String operationName) { + start(operationName, "Starting"); } - protected void logStart(String operationName, String message) { - getLogger().log(Level.FINE, String.format("%s: %s", logIterateId(operationName), message)); + protected void start(String operationName, String message) { + // TODO to unary stage + this.operationName = operationName; + getLogger().log(Level.FINER, format("%s: %s", logIterateId(operationName), message)); } - protected void logComplete(String operationName, long usecs, long stallUSecs, boolean success) { - logComplete(operationName, usecs, stallUSecs, success ? "Success" : "Failed"); + protected void complete(String operationName, long usecs, long stallUSecs, boolean success) { + complete(operationName, usecs, stallUSecs, success ? "Success" : "Failed"); } - protected void logComplete(String operationName, long usecs, long stallUSecs, String status) { + protected void complete(String operationName, long usecs, long stallUSecs, String status) { + this.operationName = operationName; getLogger() .log( - Level.FINE, - String.format( + Level.FINER, + format( "%s: %g ms (%g ms stalled) %s", logIterateId(operationName), usecs / 1000.0f, stallUSecs / 1000.0f, status)); } @@ -208,7 +246,7 @@ public PipelineStage error() { public static class NullStage extends PipelineStage { public NullStage() { - this(/* workerContext=*/ null, /* output=*/ null); + this(/* workerContext= */ null, /* output= */ null); } public NullStage(WorkerContext workerContext, PipelineStage output) { diff --git a/src/main/java/build/buildfarm/worker/ReportResultStage.java b/src/main/java/build/buildfarm/worker/ReportResultStage.java index d7bf8eeba2..a0ae1968b6 100644 --- a/src/main/java/build/buildfarm/worker/ReportResultStage.java +++ b/src/main/java/build/buildfarm/worker/ReportResultStage.java @@ -174,8 +174,7 @@ private OperationContext reportPolled(OperationContext operationContext) .build(); Operation completedOperation = - operation - .toBuilder() + operation.toBuilder() .setDone(true) .setMetadata(Any.pack(completedMetadata)) .setResponse(Any.pack(executeResponse)) diff --git a/src/main/java/build/buildfarm/worker/SuperscalarPipelineStage.java b/src/main/java/build/buildfarm/worker/SuperscalarPipelineStage.java index 64636d72c2..192835455c 100644 --- a/src/main/java/build/buildfarm/worker/SuperscalarPipelineStage.java +++ b/src/main/java/build/buildfarm/worker/SuperscalarPipelineStage.java @@ -14,17 +14,21 @@ package build.buildfarm.worker; +import java.util.HashSet; +import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; import java.util.logging.Level; -abstract class SuperscalarPipelineStage extends PipelineStage { +public abstract class SuperscalarPipelineStage extends PipelineStage { protected final int width; @SuppressWarnings("rawtypes") protected final BlockingQueue claims; + protected Set operationNames = new HashSet<>(); + private volatile boolean catastrophic = false; // ensure that only a single claim waits for available slots for core count @@ -46,6 +50,39 @@ public SuperscalarPipelineStage( protected abstract int claimsRequired(OperationContext operationContext); + @Override + public String getOperationName() { + throw new UnsupportedOperationException("use getOperationNames on superscalar stages"); + } + + public int getWidth() { + return width; + } + + public abstract int getSlotUsage(); + + public Iterable getOperationNames() { + synchronized (operationNames) { + return new HashSet(operationNames); + } + } + + @Override + protected void start(String operationName, String message) { + synchronized (operationNames) { + operationNames.add(operationName); + } + super.start(operationName, message); + } + + @Override + protected void complete(String operationName, long usecs, long stallUSecs, String status) { + super.complete(operationName, usecs, stallUSecs, status); + synchronized (operationNames) { + operationNames.remove(operationName); + } + } + synchronized void waitForReleaseOrCatastrophe(BlockingQueue queue) { boolean interrupted = false; while (!catastrophic && isClaimed()) { @@ -58,7 +95,7 @@ synchronized void waitForReleaseOrCatastrophe(BlockingQueue qu releaseClaim(operationContext.operation.getName(), claimsRequired(operationContext)); } else { try { - wait(/* timeout=*/ 10); + wait(/* timeout= */ 10); } catch (InterruptedException e) { interrupted = Thread.interrupted() || interrupted; // ignore, we will throw it eventually diff --git a/src/main/java/build/buildfarm/worker/WorkerContext.java b/src/main/java/build/buildfarm/worker/WorkerContext.java index 873ad1b938..70060acea1 100644 --- a/src/main/java/build/buildfarm/worker/WorkerContext.java +++ b/src/main/java/build/buildfarm/worker/WorkerContext.java @@ -130,4 +130,6 @@ IOResource limitExecution( int commandExecutionClaims(Command command); ResourceLimits commandExecutionSettings(Command command); + + void returnLocalResources(QueueEntry queueEntry); } diff --git a/src/main/java/build/buildfarm/worker/cgroup/Group.java b/src/main/java/build/buildfarm/worker/cgroup/Group.java index 53fd9c8cbd..d8906ed344 100644 --- a/src/main/java/build/buildfarm/worker/cgroup/Group.java +++ b/src/main/java/build/buildfarm/worker/cgroup/Group.java @@ -27,7 +27,7 @@ @Log public final class Group { - private static final Group root = new Group(/* name=*/ null, /* parent=*/ null); + private static final Group root = new Group(/* name= */ null, /* parent= */ null); private static final Path rootPath = Paths.get("/sys/fs/cgroup"); private @Nullable final String name; @@ -127,7 +127,8 @@ private List getPids(String controllerName) throws IOException { @SuppressWarnings("StatementWithEmptyBody") public void killUntilEmpty(String controllerName) throws IOException { - while (!killAllProcs(controllerName)) ; + while (!killAllProcs(controllerName)) + ; } void create(String controllerName) throws IOException { diff --git a/src/main/java/build/buildfarm/worker/persistent/BUILD b/src/main/java/build/buildfarm/worker/persistent/BUILD new file mode 100644 index 0000000000..b476e0ebb2 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/persistent/BUILD @@ -0,0 +1,31 @@ +java_library( + name = "persistent", + srcs = glob(["*.java"]), + plugins = ["//src/main/java/build/buildfarm/common:lombok"], + visibility = ["//visibility:public"], + deps = [ + "//persistentworkers/src/main/java/persistent/bazel:bazel-persistent-workers", + "//persistentworkers/src/main/java/persistent/common:persistent-common", + "//persistentworkers/src/main/java/persistent/common/util", + "//persistentworkers/src/main/protobuf:worker_protocol_java_proto", + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/worker/resources", + "//src/main/java/build/buildfarm/worker/util", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "@maven//:com_google_api_grpc_proto_google_common_protos", + "@maven//:com_google_guava_guava", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_protobuf_protobuf_java_util", + "@maven//:commons_io_commons_io", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_context", + "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_netty", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:io_grpc_grpc_stub", + "@maven//:io_prometheus_simpleclient", + "@maven//:org_apache_commons_commons_compress", + "@maven//:org_jetbrains_annotations", + "@maven//:org_projectlombok_lombok", + ], +) diff --git a/src/main/java/build/buildfarm/worker/persistent/FileAccessUtils.java b/src/main/java/build/buildfarm/worker/persistent/FileAccessUtils.java new file mode 100644 index 0000000000..ca81384e16 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/persistent/FileAccessUtils.java @@ -0,0 +1,171 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.persistent; + +import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + +import com.google.common.collect.ImmutableSet; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.PosixFilePermission; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; +import lombok.extern.java.Log; + +/** + * Utility for concurrent move/copy of files Can be extended in the future to (sym)linking if we + * need performance + */ +@Log +public final class FileAccessUtils { + // singleton class with only static methods + private FileAccessUtils() {} + + public static Path addPosixOwnerWrite(Path absPath) throws IOException { + Set perms = Files.getPosixFilePermissions(absPath); + + ImmutableSet permsWithWrite = + ImmutableSet.builder() + .addAll(perms) + .add(PosixFilePermission.OWNER_WRITE) + .build(); + + return Files.setAttribute(absPath, "posix:permissions", permsWithWrite); + } + + private static final ConcurrentHashMap fileLocks = new ConcurrentHashMap<>(); + + // Used here as a simple lock for locking "files" (paths) + private static class PathLock { + // Not used elsewhere + private PathLock() {} + } + + /** + * Copies a file, creating necessary directories, replacing existing files. The resulting file is + * set to be writeable, and we throw if we cannot set that. Thread-safe (within a process) against + * writes to the same path. + * + * @param from + * @param to + * @throws IOException + */ + public static void copyFile(Path from, Path to) throws IOException { + Path absTo = to.toAbsolutePath(); + log.finer("copyFile: " + from + " to " + absTo); + if (!Files.exists(from)) { + throw new IOException("copyFile: source file doesn't exist: " + from); + } + IOException ioException = + writeFileSafe( + to, + () -> { + try { + Files.copy(from, absTo, REPLACE_EXISTING, COPY_ATTRIBUTES); + addPosixOwnerWrite(absTo); + return null; + } catch (IOException e) { + return new IOException("copyFile() could not set writeable: " + absTo, e); + } + }); + if (ioException != null) { + throw ioException; + } + } + + /** + * Moves a file, creating necessary directories, replacing existing files. The resulting file is + * set to be writeable, and we throw if we cannot set that. Thread-safe against writes to the same + * path. + * + * @param from + * @param to + * @throws IOException + */ + public static void moveFile(Path from, Path to) throws IOException { + Path absTo = to.toAbsolutePath(); + log.finer("moveFile: " + from + " to " + absTo); + if (!Files.exists(from)) { + throw new IOException("moveFile: source file doesn't exist: " + from); + } + IOException ioException = + writeFileSafe( + absTo, + () -> { + try { + Files.move(from, absTo, REPLACE_EXISTING); + addPosixOwnerWrite(absTo); + return null; + } catch (IOException e) { + return new IOException("copyFile() could not set writeable: " + absTo, e); + } + }); + if (ioException != null) { + throw ioException; + } + } + + /** + * Deletes a file; Thread-safe against writes to the same path. + * + * @param toDelete + * @throws IOException + */ + public static void deleteFileIfExists(Path toDelete) throws IOException { + Path absTo = toDelete.toAbsolutePath(); + PathLock toLock = fileLock(absTo); + synchronized (toLock) { + try { + Files.deleteIfExists(absTo); + } finally { + fileLocks.remove(absTo); + } + } + } + + /** + * Thread-safe (not multi-process-safe) wrapper for locking paths before a write operation. + * + *

This method will create necessary parent directories. + * + *

It is up to the write operation to specify whether or not to overwrite existing files. + */ + @SuppressWarnings("PMD.UnnecessaryLocalBeforeReturn") + private static IOException writeFileSafe(Path absTo, Supplier writeOp) { + PathLock toLock = fileLock(absTo); + synchronized (toLock) { + try { + // If 'absTo' is a symlink, checks if its target file exists + Files.createDirectories(absTo.getParent()); + return writeOp.get(); + } catch (IOException e) { + // PMD will complain about UnnecessaryLocalBeforeReturn + // In this case, it is necessary to catch the exception + return e; + } finally { + // Clean up to prevent too many locks. + fileLocks.remove(absTo); + } + } + } + + // "Logical" file lock + private static PathLock fileLock(Path writeTo) { + return fileLocks.computeIfAbsent(writeTo, k -> new PathLock()); + } +} diff --git a/src/main/java/build/buildfarm/worker/persistent/Keymaker.java b/src/main/java/build/buildfarm/worker/persistent/Keymaker.java new file mode 100644 index 0000000000..edfaaf23c3 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/persistent/Keymaker.java @@ -0,0 +1,112 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.persistent; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSortedMap; +import com.google.common.hash.HashCode; +import com.google.common.hash.Hasher; +import com.google.common.hash.Hashing; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.Objects; +import java.util.SortedMap; +import persistent.bazel.client.PersistentWorker; +import persistent.bazel.client.WorkerKey; + +/** Much of the logic (hashing) is from Bazel itself (private library/methods, i.e. WorkerKey). */ +public class Keymaker { + // Constructs a key with its worker tool input files being relative paths + public static WorkerKey make( + Path opRoot, + Path workRootsDir, + ImmutableList workerInitCmd, + ImmutableList workerInitArgs, + ImmutableMap workerEnv, + String executionName, + WorkerInputs workerFiles) { + // Cancellation not yet supported; can change in the future, + // Presumably, following how Bazel's own persistent workers work + boolean sandboxed = true; + boolean cancellable = false; + + Path workRoot = + calculateWorkRoot( + workRootsDir, + workerInitCmd, + workerInitArgs, + workerEnv, + executionName, + sandboxed, + cancellable); + Path toolsRoot = workRoot.resolve(PersistentWorker.TOOL_INPUT_SUBDIR); + + SortedMap hashedTools = workerFilesWithHashes(workerFiles); + HashCode combinedToolsHash = workerFilesCombinedHash(toolsRoot, hashedTools); + + return new WorkerKey( + workerInitCmd, + workerInitArgs, + workerEnv, + workRoot, + executionName, + combinedToolsHash, + hashedTools, + sandboxed, + cancellable); + } + + // Hash of a subset of the WorkerKey + private static Path calculateWorkRoot( + Path workRootsDir, + ImmutableList workerInitCmd, + ImmutableList workerInitArgs, + ImmutableMap workerEnv, + String executionName, + boolean sandboxed, + boolean cancellable) { + int workRootId = Objects.hash(workerInitCmd, workerInitArgs, workerEnv, sandboxed, cancellable); + String workRootDirName = "work-root_" + executionName + "_" + workRootId; + return workRootsDir.resolve(workRootDirName); + } + + private static ImmutableSortedMap workerFilesWithHashes( + WorkerInputs workerFiles) { + ImmutableSortedMap.Builder workerFileHashBuilder = + ImmutableSortedMap.naturalOrder(); + + for (Path opPath : workerFiles.opToolInputs) { + Path relPath = workerFiles.opRoot.relativize(opPath); + + HashCode toolInputHash = HashCode.fromBytes(workerFiles.digestFor(opPath).toByteArray()); + workerFileHashBuilder.put(relPath, toolInputHash); + } + + return workerFileHashBuilder.build(); + } + + // Even though we hash the toolsRoot-resolved path, it doesn't exist yet. + private static HashCode workerFilesCombinedHash( + Path toolsRoot, SortedMap hashedTools) { + Hasher hasher = Hashing.sha256().newHasher(); + hashedTools.forEach( + (relPath, toolHash) -> { + hasher.putString(toolsRoot.resolve(relPath).toString(), StandardCharsets.UTF_8); + hasher.putBytes(toolHash.asBytes()); + }); + return hasher.hash(); + } +} diff --git a/src/main/java/build/buildfarm/worker/persistent/PersistentExecutor.java b/src/main/java/build/buildfarm/worker/persistent/PersistentExecutor.java new file mode 100644 index 0000000000..a96d678a03 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/persistent/PersistentExecutor.java @@ -0,0 +1,268 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.persistent; + +import build.bazel.remote.execution.v2.ActionResult; +import build.buildfarm.worker.resources.ResourceLimits; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.devtools.build.lib.worker.WorkerProtocol.Input; +import com.google.devtools.build.lib.worker.WorkerProtocol.WorkRequest; +import com.google.devtools.build.lib.worker.WorkerProtocol.WorkResponse; +import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; +import com.google.rpc.Code; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Map; +import java.util.logging.Level; +import java.util.stream.Collectors; +import lombok.extern.java.Log; +import persistent.bazel.client.WorkerKey; + +/** + * Executes an Action like Executor/DockerExecutor, writing to ActionResult. + * + *

Currently has special code for discriminating between Javac/Scalac, and other persistent + * workers, likely for debugging purposes, but need to revisit. (Can't remember fully since it was + * so long ago!) + */ +@Log +public class PersistentExecutor { + private static final ProtoCoordinator coordinator = + ProtoCoordinator.ofCommonsPool(getMaxWorkersPerKey()); + + // TODO load from config (i.e. {worker_root}/persistent) + public static final Path defaultWorkRootsDir = Paths.get("/tmp/worker/persistent/"); + + public static final String PERSISTENT_WORKER_FLAG = "--persistent_worker"; + + // TODO Revisit hardcoded actions + static final String JAVABUILDER_JAR = + "external/remote_java_tools/java_tools/JavaBuilder_deploy.jar"; + + private static final String SCALAC_EXEC_NAME = "Scalac"; + private static final String JAVAC_EXEC_NAME = "JavaBuilder"; + + // How many workers can exist at once for a given WorkerKey + // There may be multiple WorkerKeys per mnemonic, + // e.g. if builds are run with different tool fingerprints + private static final int defaultMaxWorkersPerKey = 6; + + private static int getMaxWorkersPerKey() { + try { + return Integer.parseInt(System.getenv("BUILDFARM_MAX_WORKERS_PER_KEY")); + } catch (Exception ignored) { + log.info( + "Could not get env var BUILDFARM_MAX_WORKERS_PER_KEY; defaulting to " + + defaultMaxWorkersPerKey); + } + return defaultMaxWorkersPerKey; + } + + /** + * 1) Parses action inputs into tool inputs and request inputs 2) Makes the WorkerKey 3) Loads the + * tool inputs, if needed, into the WorkerKey tool inputs dir 4) Runs the work request on its + * Coordinator, passing it the required context 5) Passes output to the resultBuilder + */ + public static Code runOnPersistentWorker( + String persistentWorkerInitCmd, + WorkFilesContext context, + String operationName, + ImmutableList argsList, + ImmutableMap envVars, + ResourceLimits limits, + Duration timeout, + Path workRootsDir, + ActionResult.Builder resultBuilder) + throws IOException { + //// Pull out persistent worker start command from the overall action request + + log.log(Level.FINE, "executeCommandOnPersistentWorker[" + operationName + "]"); + + ImmutableList initCmd = parseInitCmd(persistentWorkerInitCmd, argsList); + + String executionName = getExecutionName(argsList); + if (executionName.isEmpty()) { + log.log(Level.SEVERE, "Invalid Argument: " + argsList); + return Code.INVALID_ARGUMENT; + } + + // TODO revisit why this was necessary in the first place + // (@wiwa) I believe the reason has to do with JavaBuilder workers not relying on env vars, + // as compared to rules_scala, only reading info from the argslist of each command. + // That would mean the Java worker keys should be invariant to the env vars we see. + ImmutableMap env; + if (executionName.equals(JAVAC_EXEC_NAME)) { + env = ImmutableMap.of(); + } else { + env = envVars; + } + + int requestArgsIdx = initCmd.size(); + ImmutableList workerExecCmd = initCmd; + ImmutableList workerInitArgs = + ImmutableList.builder().add(PERSISTENT_WORKER_FLAG).build(); + ImmutableList requestArgs = argsList.subList(requestArgsIdx, argsList.size()); + + //// Make Key + + WorkerInputs workerFiles = WorkerInputs.from(context, requestArgs); + + Path binary = Paths.get(workerExecCmd.get(0)); + if (!workerFiles.containsTool(binary) && !binary.isAbsolute()) { + throw new IllegalArgumentException( + "Binary wasn't a tool input nor an absolute path: " + binary); + } + + WorkerKey key = + Keymaker.make( + context.opRoot, + workRootsDir, + workerExecCmd, + workerInitArgs, + env, + executionName, + workerFiles); + + coordinator.copyToolInputsIntoWorkerToolRoot(key, workerFiles); + + //// Make request + + // Inputs should be relative paths (if they are from operation root) + ImmutableList.Builder reqInputsBuilder = ImmutableList.builder(); + + for (Map.Entry opInput : workerFiles.allInputs.entrySet()) { + Input relInput = opInput.getValue(); + Path opPath = opInput.getKey(); + if (opPath.startsWith(workerFiles.opRoot)) { + relInput = + relInput.toBuilder().setPath(workerFiles.opRoot.relativize(opPath).toString()).build(); + } + reqInputsBuilder.add(relInput); + } + ImmutableList reqInputs = reqInputsBuilder.build(); + + WorkRequest request = + WorkRequest.newBuilder() + .addAllArguments(requestArgs) + .addAllInputs(reqInputs) + .setRequestId(0) + .build(); + + RequestCtx requestCtx = new RequestCtx(request, context, workerFiles, timeout); + + //// Run request + //// Required file operations (in/out) are the responsibility of the coordinator + + log.log(Level.FINE, "Request with key: " + key); + WorkResponse response; + String stdErr = ""; + try { + ResponseCtx fullResponse = coordinator.runRequest(key, requestCtx); + + response = fullResponse.response; + stdErr = fullResponse.errorString; + } catch (Exception e) { + String debug = + "\n\tRequest.initCmd: " + + workerExecCmd + + "\n\tRequest.initArgs: " + + workerInitArgs + + "\n\tRequest.requestArgs: " + + request.getArgumentsList(); + String msg = "Exception while running request: " + e + debug + "\n\n"; + + log.log(Level.SEVERE, msg, e); + + response = + WorkResponse.newBuilder() + .setOutput(msg) + .setExitCode(-1) // incomplete + .build(); + } + + //// Set results + + String responseOut = response.getOutput(); + log.log(Level.FINE, "WorkResponse.output: " + responseOut); + + int exitCode = response.getExitCode(); + resultBuilder + .setExitCode(exitCode) + .setStdoutRaw(response.getOutputBytes()) + .setStderrRaw(ByteString.copyFrom(stdErr, StandardCharsets.UTF_8)); + + if (exitCode == 0) { + return Code.OK; + } + + log.severe( + "PersistentExecutor.runOnPersistentWorker Failed with code: " + + exitCode + + "\n" + + responseOut + + "\n" + + executionName + + " inputs:\n" + + ImmutableList.copyOf( + reqInputs.stream().map(Input::getPath).collect(Collectors.toList()))); + return Code.FAILED_PRECONDITION; + } + + private static ImmutableList parseInitCmd(String cmdStr, ImmutableList argsList) { + if (!cmdStr.endsWith(PERSISTENT_WORKER_FLAG)) { + throw new IllegalArgumentException( + "Persistent Worker request must contain " + + PERSISTENT_WORKER_FLAG + + "\nGot: parseInitCmd[" + + cmdStr + + "]" + + "\n" + + argsList); + } + + String cmd = + cmdStr.trim().substring(0, (cmdStr.length() - PERSISTENT_WORKER_FLAG.length()) - 1); + + // Parse init command into list of space-separated words, without the persistent worker flag + ImmutableList.Builder initCmdBuilder = ImmutableList.builder(); + for (String s : argsList) { + if (cmd.isEmpty()) { + break; + } + cmd = cmd.substring(s.length()).trim(); + initCmdBuilder.add(s); + } + ImmutableList initCmd = initCmdBuilder.build(); + // Check that the persistent worker init command matches the action command + if (!initCmd.equals(argsList.subList(0, initCmd.size()))) { + throw new IllegalArgumentException("parseInitCmd?![" + initCmd + "]" + "\n" + argsList); + } + return initCmd; + } + + private static String getExecutionName(ImmutableList argsList) { + boolean isScalac = argsList.size() > 1 && argsList.get(0).endsWith("scalac/scalac"); + if (isScalac) { + return SCALAC_EXEC_NAME; + } else if (argsList.contains(JAVABUILDER_JAR)) { + return JAVAC_EXEC_NAME; + } + return "SomeOtherExec"; + } +} diff --git a/src/main/java/build/buildfarm/worker/persistent/ProtoCoordinator.java b/src/main/java/build/buildfarm/worker/persistent/ProtoCoordinator.java new file mode 100644 index 0000000000..e3c890225f --- /dev/null +++ b/src/main/java/build/buildfarm/worker/persistent/ProtoCoordinator.java @@ -0,0 +1,284 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.persistent; + +import static persistent.bazel.client.PersistentWorker.TOOL_INPUT_SUBDIR; + +import com.google.devtools.build.lib.worker.WorkerProtocol.WorkRequest; +import com.google.devtools.build.lib.worker.WorkerProtocol.WorkResponse; +import com.google.protobuf.Duration; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Timer; +import java.util.TimerTask; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Level; +import lombok.extern.java.Log; +import persistent.bazel.client.CommonsWorkerPool; +import persistent.bazel.client.PersistentWorker; +import persistent.bazel.client.WorkCoordinator; +import persistent.bazel.client.WorkerKey; +import persistent.bazel.client.WorkerSupervisor; + +/** + * Responsible for: 1) Initializing a new Worker's file environment correctly 2) pre-request + * requirements, e.g. ensuring tool input files 3) post-response requirements, i.e. putting output + * files in the right place + */ +@Log +public class ProtoCoordinator extends WorkCoordinator { + private static final String WORKER_INIT_LOG_SUFFIX = ".initargs.log"; + + private static final ConcurrentHashMap pendingReqs = + new ConcurrentHashMap<>(); + + private static final Timer timeoutScheduler = new Timer("persistent-worker-timeout", true); + + // Synchronize writes to the tool input directory per WorkerKey + // TODO: We only need a Set of WorkerKeys to synchronize on, but no ConcurrentHashSet + private static final ConcurrentHashMap toolInputSyncs = + new ConcurrentHashMap<>(); + + // Enforces locking on the same object given the same WorkerKey + private static WorkerKey keyLock(WorkerKey key) { + return toolInputSyncs.computeIfAbsent(key, k -> k); + } + + public ProtoCoordinator(CommonsWorkerPool workerPool) { + super(workerPool); + } + + public ProtoCoordinator(WorkerSupervisor supervisor, int maxWorkersPerKey) { + super(new CommonsWorkerPool(supervisor, maxWorkersPerKey)); + } + + // We copy tool inputs from the shared WorkerKey tools directory into our worker exec root, + // since there are multiple workers per key, + // and presumably there might be writes to tool inputs? + // Tool inputs which are absolute-paths (e.g. /usr/bin/...) are not affected + public static ProtoCoordinator ofCommonsPool(int maxWorkersPerKey) { + WorkerSupervisor loadToolsOnCreate = + new WorkerSupervisor() { + @Override + public PersistentWorker create(WorkerKey workerKey) throws Exception { + Path keyExecRoot = workerKey.getExecRoot(); + String workerExecDir = getUniqueSubdir(keyExecRoot); + Path workerExecRoot = keyExecRoot.resolve(workerExecDir); + copyToolsIntoWorkerExecRoot(workerKey, workerExecRoot); + + Path initArgsLogFile = workerExecRoot.resolve(workerExecDir + WORKER_INIT_LOG_SUFFIX); + if (!Files.exists(initArgsLogFile)) { + StringBuilder initArgs = new StringBuilder(); + for (String s : workerKey.getCmd()) { + initArgs.append(s); + initArgs.append('\n'); + } + for (String s : workerKey.getArgs()) { + initArgs.append(s); + initArgs.append('\n'); + } + + Files.write(initArgsLogFile, initArgs.toString().getBytes()); + } + + return new PersistentWorker(workerKey, workerExecDir); + } + }; + return new ProtoCoordinator(loadToolsOnCreate, maxWorkersPerKey); + } + + public void copyToolInputsIntoWorkerToolRoot(WorkerKey key, WorkerInputs workerFiles) + throws IOException { + WorkerKey lock = keyLock(key); + synchronized (lock) { + try { + // Move tool inputs as needed + Path workToolRoot = key.getExecRoot().resolve(PersistentWorker.TOOL_INPUT_SUBDIR); + for (Path opToolPath : workerFiles.opToolInputs) { + Path workToolPath = workerFiles.relativizeInput(workToolRoot, opToolPath); + if (!Files.exists(workToolPath)) { + workerFiles.copyInputFile(opToolPath, workToolPath); + } + } + } finally { + toolInputSyncs.remove(key); + } + } + } + + private static String getUniqueSubdir(Path workRoot) { + String uuid = UUID.randomUUID().toString(); + while (Files.exists(workRoot.resolve(uuid))) { + uuid = UUID.randomUUID().toString(); + } + return uuid; + } + + // copyToolInputsIntoWorkerToolRoot() should have been called before this. + private static void copyToolsIntoWorkerExecRoot(WorkerKey key, Path workerExecRoot) + throws IOException { + log.log(Level.FINE, "loadToolsIntoWorkerRoot() into: " + workerExecRoot); + + Path toolInputRoot = key.getExecRoot().resolve(TOOL_INPUT_SUBDIR); + for (Path relPath : key.getWorkerFilesWithHashes().keySet()) { + Path toolInputPath = toolInputRoot.resolve(relPath); + Path execRootPath = workerExecRoot.resolve(relPath); + + FileAccessUtils.copyFile(toolInputPath, execRootPath); + } + } + + @Override + public WorkRequest preWorkInit(WorkerKey key, RequestCtx request, PersistentWorker worker) + throws IOException { + PersistentWorker pendingWorker = pendingReqs.putIfAbsent(request, worker); + // null means that this request was not in pendingReqs (the expected case) + if (pendingWorker != null) { + if (pendingWorker != worker) { + throw new IllegalArgumentException( + "Already have a persistent worker on the job: " + request.request); + } else { + throw new IllegalArgumentException( + "Got the same request for the same worker while it's running: " + request.request); + } + } + startTimeoutTimer(request); + + // Symlinking should hypothetically be faster+leaner than copying inputs, but it's buggy. + copyNontoolInputs(request.workerInputs, worker.getExecRoot()); + + return request.request; + } + + // After the worker has finished, output files need to be visible in the operation directory + @Override + public ResponseCtx postWorkCleanup( + WorkResponse response, PersistentWorker worker, RequestCtx request) throws IOException { + pendingReqs.remove(request); + + if (response == null) { + throw new RuntimeException("postWorkCleanup: WorkResponse was null!"); + } + + if (response.getExitCode() == 0) { + try { + Path workerExecRoot = worker.getExecRoot(); + moveOutputsToOperationRoot(request.filesContext, workerExecRoot); + cleanUpNontoolInputs(request.workerInputs, workerExecRoot); + } catch (IOException e) { + throw logBadCleanup(request, e); + } + } + + return new ResponseCtx(response, worker.flushStdErr()); + } + + private IOException logBadCleanup(RequestCtx request, IOException e) { + WorkFilesContext context = request.filesContext; + + StringBuilder sb = new StringBuilder(122); + sb.append("Output files failure debug for request with args<") + .append(request.request.getArgumentsList()) + .append(">:\ngetOutputPathsList:\n") + .append(context.outputPaths) + .append("getOutputFilesList:\n") + .append(context.outputFiles) + .append("getOutputDirectoriesList:\n") + .append(context.outputDirectories); + + log.log(Level.SEVERE, sb.toString(), e); + + return new IOException("Response was OK but failed on postWorkCleanup", e); + } + + private void copyNontoolInputs(WorkerInputs workerInputs, Path workerExecRoot) + throws IOException { + for (Path opPath : workerInputs.allInputs.keySet()) { + if (!workerInputs.allToolInputs.contains(opPath)) { + Path execPath = workerInputs.relativizeInput(workerExecRoot, opPath); + workerInputs.copyInputFile(opPath, execPath); + } + } + } + + // Make outputs visible to the rest of Worker machinery + // see DockerExecutor::copyOutputsOutOfContainer + void moveOutputsToOperationRoot(WorkFilesContext context, Path workerExecRoot) + throws IOException { + Path opRoot = context.opRoot; + + for (String outputDir : context.outputDirectories) { + Path outputDirPath = Paths.get(outputDir); + Files.createDirectories(outputDirPath); + } + + for (String relOutput : context.outputFiles) { + Path execOutputPath = workerExecRoot.resolve(relOutput); + Path opOutputPath = opRoot.resolve(relOutput); + + FileAccessUtils.moveFile(execOutputPath, opOutputPath); + } + } + + private void cleanUpNontoolInputs(WorkerInputs workerInputs, Path workerExecRoot) + throws IOException { + for (Path opPath : workerInputs.allInputs.keySet()) { + if (!workerInputs.allToolInputs.contains(opPath)) { + workerInputs.deleteInputFileIfExists(workerExecRoot, opPath); + } + } + } + + private void startTimeoutTimer(RequestCtx request) { + Duration timeout = request.timeout; + if (timeout != null) { + long timeoutNanos = timeout.getSeconds() * 1000000000L + timeout.getNanos(); + timeoutScheduler.schedule(new RequestTimeoutHandler(request), timeoutNanos); + } + } + + private class RequestTimeoutHandler extends TimerTask { + private final RequestCtx request; + + private RequestTimeoutHandler(RequestCtx request) { + this.request = request; + } + + @Override + public void run() { + onTimeout(this.request, pendingReqs.get(this.request)); + } + } + + private void onTimeout(RequestCtx request, PersistentWorker worker) { + if (worker != null) { + log.severe("Persistent Worker timed out on request: " + request.request); + try { + this.workerPool.invalidateObject(worker.getKey(), worker); + } catch (Exception e) { + log.severe( + "Tried to invalidate worker for request:\n" + + request + + "\n\tbut got: " + + e + + "\n\nCalling worker.destroy() and moving on."); + worker.destroy(); + } + } + } +} diff --git a/src/main/java/build/buildfarm/worker/persistent/RequestCtx.java b/src/main/java/build/buildfarm/worker/persistent/RequestCtx.java new file mode 100644 index 0000000000..36f42b2f12 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/persistent/RequestCtx.java @@ -0,0 +1,42 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.persistent; + +import com.google.devtools.build.lib.worker.WorkerProtocol.WorkRequest; +import com.google.protobuf.Duration; +import persistent.common.CtxAround; + +public class RequestCtx implements CtxAround { + public final WorkRequest request; + + public final WorkFilesContext filesContext; + + public final WorkerInputs workerInputs; + + public final Duration timeout; + + public RequestCtx( + WorkRequest request, WorkFilesContext ctx, WorkerInputs workFiles, Duration timeout) { + this.request = request; + this.filesContext = ctx; + this.workerInputs = workFiles; + this.timeout = timeout; + } + + @Override + public WorkRequest get() { + return request; + } +} diff --git a/src/main/java/build/buildfarm/metrics/gcp/GcpMetricsPublisher.java b/src/main/java/build/buildfarm/worker/persistent/ResponseCtx.java similarity index 51% rename from src/main/java/build/buildfarm/metrics/gcp/GcpMetricsPublisher.java rename to src/main/java/build/buildfarm/worker/persistent/ResponseCtx.java index 40a6ddb1f8..0ff6edcdae 100644 --- a/src/main/java/build/buildfarm/metrics/gcp/GcpMetricsPublisher.java +++ b/src/main/java/build/buildfarm/worker/persistent/ResponseCtx.java @@ -1,4 +1,4 @@ -// Copyright 2020 The Bazel Authors. All rights reserved. +// Copyright 2023 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,20 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -package build.buildfarm.metrics.gcp; +package build.buildfarm.worker.persistent; -import build.buildfarm.common.config.BuildfarmConfigs; -import build.buildfarm.metrics.AbstractMetricsPublisher; +import com.google.devtools.build.lib.worker.WorkerProtocol.WorkResponse; +import persistent.common.CtxAround; -public class GcpMetricsPublisher extends AbstractMetricsPublisher { - private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); +public class ResponseCtx implements CtxAround { + public final WorkResponse response; - public GcpMetricsPublisher() { - super(configs.getServer().getClusterId()); + public final String errorString; + + public ResponseCtx(WorkResponse response, String errorString) { + this.response = response; + this.errorString = errorString; } @Override - public void publishMetric(String metricName, Object metricValue) { - throw new UnsupportedOperationException(); + public WorkResponse get() { + return response; } } diff --git a/src/main/java/build/buildfarm/worker/persistent/WorkFilesContext.java b/src/main/java/build/buildfarm/worker/persistent/WorkFilesContext.java new file mode 100644 index 0000000000..4aefc7f290 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/persistent/WorkFilesContext.java @@ -0,0 +1,85 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.persistent; + +import build.bazel.remote.execution.v2.Command; +import build.buildfarm.v1test.Tree; +import build.buildfarm.worker.util.InputsIndexer; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.devtools.build.lib.worker.WorkerProtocol.Input; +import java.nio.file.Path; + +/** POJO/data class grouping all the input/output file requirements for persistent workers */ +public class WorkFilesContext { + public final Path opRoot; + + public final Tree execTree; + + public final ImmutableList outputPaths; + + public final ImmutableList outputFiles; + + public final ImmutableList outputDirectories; + + private final InputsIndexer inputsIndexer; + + private ImmutableMap pathInputs = null; + + private ImmutableMap toolInputs = null; + + public WorkFilesContext( + Path opRoot, + Tree execTree, + ImmutableList outputPaths, + ImmutableList outputFiles, + ImmutableList outputDirectories) { + this.opRoot = opRoot.toAbsolutePath(); + this.execTree = execTree; + this.outputPaths = outputPaths; + this.outputFiles = outputFiles; + this.outputDirectories = outputDirectories; + + this.inputsIndexer = new InputsIndexer(execTree, this.opRoot); + } + + public static WorkFilesContext fromContext(Path opRoot, Tree inputsTree, Command opCommand) { + return new WorkFilesContext( + opRoot, + inputsTree, + ImmutableList.copyOf(opCommand.getOutputPathsList()), + ImmutableList.copyOf(opCommand.getOutputFilesList()), + ImmutableList.copyOf(opCommand.getOutputDirectoriesList())); + } + + // Paths are absolute paths from the opRoot; same as the Input.getPath(); + public ImmutableMap getPathInputs() { + synchronized (this) { + if (pathInputs == null) { + pathInputs = inputsIndexer.getAllInputs(); + } + } + return pathInputs; + } + + public ImmutableMap getToolInputs() { + synchronized (this) { + if (toolInputs == null) { + toolInputs = inputsIndexer.getToolInputs(); + } + } + return toolInputs; + } +} diff --git a/src/main/java/build/buildfarm/worker/persistent/WorkerInputs.java b/src/main/java/build/buildfarm/worker/persistent/WorkerInputs.java new file mode 100644 index 0000000000..de731b9d83 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/persistent/WorkerInputs.java @@ -0,0 +1,117 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.persistent; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.devtools.build.lib.worker.WorkerProtocol.Input; +import com.google.protobuf.ByteString; +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import lombok.extern.java.Log; + +@Log +public class WorkerInputs { + public final Path opRoot; + // Some tool inputs are not under opRoot + public final ImmutableSet absToolInputs; + // The Paths in these collections should all be absolute and under opRoot + public final ImmutableSet opToolInputs; + public final ImmutableMap allInputs; + + public final ImmutableSet allToolInputs; + + public WorkerInputs( + Path opRoot, + ImmutableSet absToolInputs, + ImmutableSet opToolInputs, + ImmutableMap allInputs) { + this.opRoot = opRoot; + this.absToolInputs = absToolInputs; + this.opToolInputs = opToolInputs; + this.allInputs = allInputs; + + this.allToolInputs = + ImmutableSet.builder().addAll(absToolInputs).addAll(opToolInputs).build(); + + // Currently not a concern but could be in the future + for (Path tool : opToolInputs) { + if (!allInputs.containsKey(tool)) { + String msg = "Tool not found in inputs: " + tool; + log.severe(msg); + throw new IllegalArgumentException(msg); + } + } + } + + public boolean containsTool(Path tool) { + return allToolInputs.contains(opRoot.resolve(tool)); + } + + public Path relativizeInput(Path newRoot, Path input) { + return newRoot.resolve(opRoot.relativize(input)); + } + + public void copyInputFile(Path from, Path to) throws IOException { + checkFileIsInput("copyInputFile()", from); + FileAccessUtils.copyFile(from, to); + } + + public void deleteInputFileIfExists(Path workerExecRoot, Path opPathInput) throws IOException { + checkFileIsInput("deleteInputFile()", opPathInput); + Path execPathInput = relativizeInput(workerExecRoot, opPathInput); + FileAccessUtils.deleteFileIfExists(execPathInput); + } + + private void checkFileIsInput(String operation, Path file) { + if (!allInputs.containsKey(file)) { + throw new IllegalArgumentException(operation + " called on non-input file: " + file); + } + } + + public ByteString digestFor(Path inputPath) { + Input input = allInputs.get(inputPath); + if (input == null) { + throw new IllegalArgumentException("digestFor() called on non-input file: " + inputPath); + } + return input.getDigest(); + } + + public static WorkerInputs from(WorkFilesContext workFilesContext, List reqArgs) { + ImmutableMap pathInputs = workFilesContext.getPathInputs(); + + ImmutableSet toolsAbsPaths = workFilesContext.getToolInputs().keySet(); + + ImmutableSet toolInputs = + ImmutableSet.copyOf( + toolsAbsPaths.stream().filter(p -> p.startsWith(workFilesContext.opRoot)).iterator()); + ImmutableSet absToolInputs = + ImmutableSet.copyOf(toolsAbsPaths.stream().filter(p -> !toolInputs.contains(p)).iterator()); + + String inputsDebugMsg = + "ParsedWorkFiles:" + + "\nallInputs: " + + pathInputs.keySet() + + "\ntoolInputs: " + + toolInputs + + "\nabsToolInputs: " + + absToolInputs; + + log.fine(inputsDebugMsg); + + return new WorkerInputs(workFilesContext.opRoot, absToolInputs, toolInputs, pathInputs); + } +} diff --git a/src/main/java/build/buildfarm/worker/resources/BUILD b/src/main/java/build/buildfarm/worker/resources/BUILD index ac2d69179b..36a4dc3d34 100644 --- a/src/main/java/build/buildfarm/worker/resources/BUILD +++ b/src/main/java/build/buildfarm/worker/resources/BUILD @@ -8,6 +8,7 @@ java_library( "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_googlecode_json_simple_json_simple", + "@maven//:io_prometheus_simpleclient", "@maven//:org_apache_commons_commons_lang3", ], ) diff --git a/src/main/java/build/buildfarm/worker/resources/ExecutionPropertiesParser.java b/src/main/java/build/buildfarm/worker/resources/ExecutionPropertiesParser.java index 183c48da18..2c219589fb 100644 --- a/src/main/java/build/buildfarm/worker/resources/ExecutionPropertiesParser.java +++ b/src/main/java/build/buildfarm/worker/resources/ExecutionPropertiesParser.java @@ -67,6 +67,12 @@ public static ResourceLimits Parse(Command command) { parser.put( ExecutionProperties.DEBUG_TESTS_ONLY, ExecutionPropertiesParser::storeDebugTestsOnly); parser.put(ExecutionProperties.DEBUG_TARGET, ExecutionPropertiesParser::storeDebugTarget); + parser.put( + ExecutionProperties.PERSISTENT_WORKER_KEY, + ExecutionPropertiesParser::storePersistentWorkerKey); + parser.put( + ExecutionProperties.PERSISTENT_WORKER_COMMAND, + ExecutionPropertiesParser::storePersistentWorkerCommand); ResourceLimits limits = new ResourceLimits(); command @@ -327,6 +333,32 @@ private static void storeDebugTarget(ResourceLimits limits, Property property) { describeChange(limits.description, "debug target", property.getValue(), property); } + /** + * @brief Stores persistentWorkerKey + * @details Parses and stores a String. + * @param limits Current limits to apply changes to. + * @param property The property to store. + */ + private static void storePersistentWorkerKey(ResourceLimits limits, Property property) { + limits.persistentWorkerKey = property.getValue(); + ArrayList xs = new ArrayList<>(); + xs.add("Hash of tool inputs for remote persistent workers"); + describeChange(xs, "persistentWorkerKey(hash of tool inputs)", property.getValue(), property); + } + + /** + * @brief Stores persistentWorkerCommand + * @details Parses and stores a String. + * @param limits Current limits to apply changes to. + * @param property The property to store. + */ + private static void storePersistentWorkerCommand(ResourceLimits limits, Property property) { + limits.persistentWorkerCommand = property.getValue(); + ArrayList xs = new ArrayList<>(); + xs.add("persistentWorkerCommand"); + describeChange(xs, "persistentWorkerCommand", property.getValue(), property); + } + /** * @brief Store the description of the change made. * @details Adds a debug message on the resource change. diff --git a/src/main/java/build/buildfarm/worker/resources/LocalResourceSet.java b/src/main/java/build/buildfarm/worker/resources/LocalResourceSet.java new file mode 100644 index 0000000000..97d64fe9b1 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/resources/LocalResourceSet.java @@ -0,0 +1,37 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.resources; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.Semaphore; + +/** + * @class Local Resource Set + * @brief A fixed amount of a specific resource. + * @details We define limited resources as a counting semaphores whose configuration contains a name + * and a count representing a physical or logical group of units obtained by executors as a + * precondition to fulfill a long running operation. These units are released upon the + * operation's completion. The resource is requested by the action's platform properties. These + * resources are specific to the individual worker. + */ +public class LocalResourceSet { + /** + * @field resources + * @brief A set containing resource semaphores organized by name. + * @details Key is name, and value is current usage amount. + */ + public Map resources = new HashMap<>(); +} diff --git a/src/main/java/build/buildfarm/worker/resources/LocalResourceSetMetrics.java b/src/main/java/build/buildfarm/worker/resources/LocalResourceSetMetrics.java new file mode 100644 index 0000000000..d219d56612 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/resources/LocalResourceSetMetrics.java @@ -0,0 +1,47 @@ +// Copyright 2020 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.resources; + +import io.prometheus.client.Gauge; + +/** + * @class LocalResourceSetMetrics + * @brief Tracks metrics related to a worker's limited local resources. + * @details Answers how many resources exist, how many are claimed, and by how many requesters. + */ +public class LocalResourceSetMetrics { + public static final Gauge resourceUsageMetric = + Gauge.build() + .name("local_resource_usage") + .labelNames("resource_name") + .help("The number of claims for each resource currently being used for execution") + .register(); + + public static final Gauge resourceTotalMetric = + Gauge.build() + .name("local_resource_total") + .labelNames("resource_name") + .help("The total number of claims exist for a particular resource") + .register(); + + public static final Gauge requestersMetric = + Gauge.build() + .name("local_resource_requesters") + .labelNames("resource_name") + .help( + "Tracks how many actions have requested local resources. This can help determine if" + + " resources are being hogged by some actions.") + .register(); +} diff --git a/src/main/java/build/buildfarm/worker/resources/LocalResourceSetUtils.java b/src/main/java/build/buildfarm/worker/resources/LocalResourceSetUtils.java new file mode 100644 index 0000000000..e8a1e56131 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/resources/LocalResourceSetUtils.java @@ -0,0 +1,120 @@ +// Copyright 2020 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.resources; + +import build.bazel.remote.execution.v2.Platform; +import build.buildfarm.common.config.LimitedResource; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Semaphore; +import org.apache.commons.lang3.StringUtils; + +/** + * @class LocalResourceSetUtils + * @brief Utilities for working with the worker's set of local limited resources. + * @details The methods help with allocation / de-allocation of claims, as well as metrics printing. + */ +public class LocalResourceSetUtils { + private static final LocalResourceSetMetrics metrics = new LocalResourceSetMetrics(); + + public static LocalResourceSet create(List resources) { + LocalResourceSet resourceSet = new LocalResourceSet(); + for (LimitedResource resource : resources) { + resourceSet.resources.put(resource.getName(), new Semaphore(resource.getAmount())); + metrics.resourceTotalMetric.labels(resource.getName()).set(resource.getAmount()); + } + return resourceSet; + } + + public static boolean claimResources(Platform platform, LocalResourceSet resourceSet) { + List> claimed = new ArrayList<>(); + + boolean allClaimed = true; + for (Platform.Property property : platform.getPropertiesList()) { + // Skip properties that are not requesting a limited resource. + String resourceName = getResourceName(property); + Semaphore resource = resourceSet.resources.get(resourceName); + if (resource == null) { + continue; + } + + // Attempt to claim. If claiming fails, we must return all other claims. + int requestAmount = getResourceRequestAmount(property); + boolean wasAcquired = semaphoreAquire(resource, resourceName, requestAmount); + if (wasAcquired) { + claimed.add(new AbstractMap.SimpleEntry<>(resourceName, requestAmount)); + } else { + allClaimed = false; + break; + } + } + + // cleanup remaining resources if they were not all claimed. + if (!allClaimed) { + for (Map.Entry claim : claimed) { + semaphoreRelease( + resourceSet.resources.get(claim.getKey()), claim.getKey(), claim.getValue()); + } + } + + return allClaimed; + } + + public static void releaseClaims(Platform platform, LocalResourceSet resourceSet) { + for (Platform.Property property : platform.getPropertiesList()) { + String resourceName = getResourceName(property); + Semaphore resource = resourceSet.resources.get(resourceName); + if (resource == null) { + continue; + } + int requestAmount = getResourceRequestAmount(property); + semaphoreRelease(resource, resourceName, requestAmount); + } + } + + private static boolean semaphoreAquire(Semaphore resource, String resourceName, int amount) { + boolean wasAcquired = resource.tryAcquire(amount); + if (wasAcquired) { + metrics.resourceUsageMetric.labels(resourceName).inc(amount); + } + metrics.requestersMetric.labels(resourceName).inc(); + return wasAcquired; + } + + private static void semaphoreRelease(Semaphore resource, String resourceName, int amount) { + resource.release(amount); + metrics.resourceUsageMetric.labels(resourceName).dec(amount); + metrics.requestersMetric.labels(resourceName).dec(); + } + + private static int getResourceRequestAmount(Platform.Property property) { + // We support resource values that are not numbers and interpret them as a request for 1 + // resource. For example "gpu:RTX-4090" is equivalent to resource:gpu:1". + try { + return Integer.parseInt(property.getValue()); + } catch (NumberFormatException e) { + return 1; + } + } + + private static String getResourceName(Platform.Property property) { + // We match to keys whether they are prefixed "resource:" or not. + // "resource:gpu:1" requests the gpu resource in the same way that "gpu:1" does. + // The prefix originates from bazel's syntax for the --extra_resources flag. + return StringUtils.removeStart(property.getName(), "resource:"); + } +} diff --git a/src/main/java/build/buildfarm/worker/resources/ResourceDecider.java b/src/main/java/build/buildfarm/worker/resources/ResourceDecider.java index a5229a3377..5523f1aa79 100644 --- a/src/main/java/build/buildfarm/worker/resources/ResourceDecider.java +++ b/src/main/java/build/buildfarm/worker/resources/ResourceDecider.java @@ -200,11 +200,19 @@ private static void adjustLimits( } private static void decideSandboxUsage(ResourceLimits limits, SandboxSettings sandbox) { - // configured on - if (sandbox.isAlwaysUse()) { + // Decide which sandbox limitations are enabled by default acording to the deployment's + // configuration. + if (sandbox.isAlwaysUseSandbox()) { limits.useLinuxSandbox = true; - limits.description.add("enabled"); - return; + limits.description.add("enabled sandbox by default"); + } + if (sandbox.isAlwaysUseCgroups()) { + limits.cgroups = true; + limits.description.add("enabled cgroups by default"); + } + if (sandbox.isAlwaysUseTmpFs()) { + limits.tmpFs = true; + limits.description.add("enabled tmpfs by default"); } // selected based on other features diff --git a/src/main/java/build/buildfarm/worker/resources/ResourceLimits.java b/src/main/java/build/buildfarm/worker/resources/ResourceLimits.java index 4e5601625c..e40ce738a2 100644 --- a/src/main/java/build/buildfarm/worker/resources/ResourceLimits.java +++ b/src/main/java/build/buildfarm/worker/resources/ResourceLimits.java @@ -75,6 +75,13 @@ public class ResourceLimits { */ public ContainerSettings containerSettings = new ContainerSettings(); + /** + * @field cgroups + * @brief Whether to use cgroups for resource limitation. + * @details Decides whether to use cgroups for restricting cores, mem, etc. + */ + public boolean cgroups = true; + /** * @field cpu * @brief Resource limitations on CPUs. @@ -156,4 +163,18 @@ public class ResourceLimits { * @details This can be used to debug execution behavior. */ public final ArrayList description = new ArrayList<>(); + + /** + * @field persistentWorkerKey + * @brief Hash of tool inputs for remote persistent workers + * @details See https://github.com/bazelbuild/bazel/issues/10091 + */ + public String persistentWorkerKey = ""; + + /** + * @field persistentWorkerCommand + * @brief Command string to start the persistent worker + * @details See https://github.com/bazelbuild/bazel/issues/10091 + */ + public String persistentWorkerCommand = ""; } diff --git a/src/main/java/build/buildfarm/worker/shard/BUILD b/src/main/java/build/buildfarm/worker/shard/BUILD index 3df1ab7e77..0f69b6fdff 100644 --- a/src/main/java/build/buildfarm/worker/shard/BUILD +++ b/src/main/java/build/buildfarm/worker/shard/BUILD @@ -4,11 +4,10 @@ java_library( plugins = ["//src/main/java/build/buildfarm/common:lombok"], visibility = ["//visibility:public"], deps = [ - "//src/main/java/build/buildfarm/admin", - "//src/main/java/build/buildfarm/admin/aws", "//src/main/java/build/buildfarm/backplane", "//src/main/java/build/buildfarm/cas", "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common:BuildfarmExecutors", "//src/main/java/build/buildfarm/common/config", "//src/main/java/build/buildfarm/common/grpc", "//src/main/java/build/buildfarm/common/services", @@ -22,7 +21,8 @@ java_library( "//src/main/java/build/buildfarm/worker/resources", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_grpc", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@io_grpc_grpc_proto//:health_java_proto", "@maven//:com_github_ben_manes_caffeine_caffeine", "@maven//:com_github_pcj_google_options", "@maven//:com_google_code_findbugs_jsr305", @@ -32,17 +32,11 @@ java_library( "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", - "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_services", "@maven//:io_grpc_grpc_stub", "@maven//:io_prometheus_simpleclient", "@maven//:javax_annotation_javax_annotation_api", "@maven//:org_projectlombok_lombok", - "@maven//:org_springframework_boot_spring_boot", - "@maven//:org_springframework_boot_spring_boot_autoconfigure", - "@maven//:org_springframework_spring_beans", - "@maven//:org_springframework_spring_context", - "@maven//:org_springframework_spring_core", ], ) diff --git a/src/main/java/build/buildfarm/worker/shard/CFCExecFileSystem.java b/src/main/java/build/buildfarm/worker/shard/CFCExecFileSystem.java index 08ef085277..80fe18f0f5 100644 --- a/src/main/java/build/buildfarm/worker/shard/CFCExecFileSystem.java +++ b/src/main/java/build/buildfarm/worker/shard/CFCExecFileSystem.java @@ -19,12 +19,13 @@ import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; import static com.google.common.collect.Iterables.concat; -import static com.google.common.collect.Iterables.filter; import static com.google.common.util.concurrent.Futures.allAsList; +import static com.google.common.util.concurrent.Futures.catchingAsync; import static com.google.common.util.concurrent.Futures.immediateFailedFuture; import static com.google.common.util.concurrent.Futures.immediateFuture; import static com.google.common.util.concurrent.Futures.transform; import static com.google.common.util.concurrent.Futures.transformAsync; +import static com.google.common.util.concurrent.MoreExecutors.directExecutor; import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; import static com.google.common.util.concurrent.MoreExecutors.shutdownAndAwaitTermination; import static java.util.concurrent.TimeUnit.MINUTES; @@ -35,30 +36,38 @@ import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.DirectoryNode; -import build.bazel.remote.execution.v2.FileNode; import build.bazel.remote.execution.v2.SymlinkNode; import build.buildfarm.cas.ContentAddressableStorage; import build.buildfarm.cas.cfc.CASFileCache; import build.buildfarm.common.BuildfarmExecutors; +import build.buildfarm.common.DigestUtil; import build.buildfarm.common.io.Directories; import build.buildfarm.common.io.Dirent; +import build.buildfarm.worker.ExecDirException; +import build.buildfarm.worker.ExecDirException.ViolationException; import build.buildfarm.worker.OutputDirectory; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; import com.google.common.util.concurrent.ListenableFuture; import java.io.IOException; import java.io.InputStream; +import java.nio.file.FileStore; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.attribute.UserPrincipal; -import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.Stack; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.function.Consumer; import java.util.logging.Level; +import java.util.regex.Pattern; import javax.annotation.Nullable; import lombok.extern.java.Log; @@ -71,8 +80,11 @@ class CFCExecFileSystem implements ExecFileSystem { // perform first-available non-output symlinking and retain directories in cache private final boolean linkInputDirectories; - // override the symlinking above for a set of matching paths - private final Iterable realInputDirectories; + // indicate symlinking above for a set of matching paths + private final Iterable linkedInputDirectories; + + // permit symlinks to point to absolute paths in inputs + private final boolean allowSymlinkTargetAbsolute; private final Map> rootKeys = new ConcurrentHashMap<>(); private final Map> rootInputFiles = new ConcurrentHashMap<>(); @@ -80,20 +92,25 @@ class CFCExecFileSystem implements ExecFileSystem { private final ExecutorService fetchService = BuildfarmExecutors.getFetchServicePool(); private final ExecutorService removeDirectoryService; private final ExecutorService accessRecorder; + private FileStore fileStore; // initialized with start CFCExecFileSystem( Path root, CASFileCache fileCache, @Nullable UserPrincipal owner, boolean linkInputDirectories, - Iterable realInputDirectories, + Iterable linkedInputDirectories, + boolean allowSymlinkTargetAbsolute, ExecutorService removeDirectoryService, ExecutorService accessRecorder) { this.root = root; this.fileCache = fileCache; this.owner = owner; this.linkInputDirectories = linkInputDirectories; - this.realInputDirectories = realInputDirectories; + this.linkedInputDirectories = + Iterables.transform( + linkedInputDirectories, realInputDirectory -> Pattern.compile(realInputDirectory)); + this.allowSymlinkTargetAbsolute = allowSymlinkTargetAbsolute; this.removeDirectoryService = removeDirectoryService; this.accessRecorder = accessRecorder; } @@ -102,9 +119,10 @@ class CFCExecFileSystem implements ExecFileSystem { @Override public void start(Consumer> onDigests, boolean skipLoad) throws IOException, InterruptedException { + fileStore = Files.getFileStore(root); List dirents = null; try { - dirents = readdir(root, /* followSymlinks= */ false, Files.getFileStore(root)); + dirents = readdir(root, /* followSymlinks= */ false, fileStore); } catch (IOException e) { log.log(Level.SEVERE, "error reading directory " + root.toString(), e); } @@ -116,7 +134,8 @@ public void start(Consumer> onDigests, boolean skipLoad) String name = dirent.getName(); Path child = root.resolve(name); if (!child.equals(fileCache.getRoot())) { - removeDirectoryFutures.add(Directories.remove(root.resolve(name), removeDirectoryService)); + removeDirectoryFutures.add( + Directories.remove(root.resolve(name), fileStore, removeDirectoryService)); } } @@ -135,7 +154,8 @@ public void start(Consumer> onDigests, boolean skipLoad) } @Override - public void stop() { + public void stop() throws InterruptedException { + fileCache.stop(); if (!shutdownAndAwaitTermination(fetchService, 1, MINUTES)) { log.log(Level.SEVERE, "could not terminate fetchService"); } @@ -166,7 +186,7 @@ public InputStream newInput(Compressor.Value compressor, Digest digest, long off private ListenableFuture putSymlink(Path path, SymlinkNode symlinkNode) { Path symlinkPath = path.resolve(symlinkNode.getName()); Path relativeTargetPath = path.getFileSystem().getPath(symlinkNode.getTarget()); - checkState(!relativeTargetPath.isAbsolute()); + checkState(allowSymlinkTargetAbsolute || !relativeTargetPath.isAbsolute()); return listeningDecorator(fetchService) .submit( () -> { @@ -177,33 +197,29 @@ private ListenableFuture putSymlink(Path path, SymlinkNode symlinkNode) { @SuppressWarnings("ConstantConditions") private ListenableFuture put( - Path path, FileNode fileNode, ImmutableList.Builder inputFiles) { - Path filePath = path.resolve(fileNode.getName()); - Digest digest = fileNode.getDigest(); + Digest digest, Path path, boolean isExecutable, Consumer onKey) { if (digest.getSizeBytes() == 0) { return listeningDecorator(fetchService) .submit( () -> { - Files.createFile(filePath); + Files.createFile(path); // ignore executable return null; }); } - String key = fileCache.getKey(digest, fileNode.getIsExecutable()); + String key = fileCache.getKey(digest, isExecutable); return transformAsync( - fileCache.put(digest, fileNode.getIsExecutable(), fetchService), + fileCache.put(digest, isExecutable, fetchService), (fileCachePath) -> { checkNotNull(key); // we saw null entries in the built immutable list without synchronization - synchronized (inputFiles) { - inputFiles.add(key); - } - if (fileNode.getDigest().getSizeBytes() != 0) { + onKey.accept(key); + if (digest.getSizeBytes() != 0) { try { // Coordinated with the CAS - consider adding an API for safe path // access synchronized (fileCache) { - Files.createLink(filePath, fileCachePath); + Files.createLink(path, fileCachePath); } } catch (IOException e) { return immediateFailedFuture(e); @@ -214,19 +230,44 @@ private ListenableFuture put( fetchService); } + private ListenableFuture catchingPut( + Digest digest, Path root, Path path, boolean isExecutable, Consumer onKey) { + return catchingAsync( + put(digest, path, isExecutable, onKey), + Throwable.class, // required per docs + t -> { + if (t instanceof IOException) { + return immediateFailedFuture( + new ViolationException( + digest, root.relativize(path), isExecutable, (IOException) t)); + } + return immediateFailedFuture(t); + }, + directExecutor()); + } + private Iterable> fetchInputs( + Path root, Path path, Digest directoryDigest, Map directoriesIndex, OutputDirectory outputDirectory, - ImmutableList.Builder inputFiles, + Set linkedInputDirectories, + Consumer onKey, ImmutableList.Builder inputDirectories) throws IOException { Directory directory = directoriesIndex.get(directoryDigest); checkNotNull(directory); Iterable> downloads = directory.getFilesList().stream() - .map(fileNode -> put(path, fileNode, inputFiles)) + .map( + fileNode -> + catchingPut( + fileNode.getDigest(), + root, + path.resolve(fileNode.getName()), + fileNode.getIsExecutable(), + onKey)) .collect(ImmutableList.toImmutableList()); downloads = concat( @@ -235,48 +276,47 @@ private Iterable> fetchInputs( .map(symlinkNode -> putSymlink(path, symlinkNode)) .collect(ImmutableList.toImmutableList())); + ImmutableList.Builder> linkedDirectories = ImmutableList.builder(); for (DirectoryNode directoryNode : directory.getDirectoriesList()) { Digest digest = directoryNode.getDigest(); String name = directoryNode.getName(); OutputDirectory childOutputDirectory = outputDirectory != null ? outputDirectory.getChild(name) : null; Path dirPath = path.resolve(name); - if (childOutputDirectory != null || !linkInputDirectories) { + if (childOutputDirectory != null + || !linkInputDirectories + || !linkedInputDirectories.contains(dirPath)) { Files.createDirectories(dirPath); downloads = concat( downloads, fetchInputs( + root, dirPath, digest, directoriesIndex, childOutputDirectory, - inputFiles, + linkedInputDirectories, + onKey, inputDirectories)); } else { - downloads = - concat( - downloads, - ImmutableList.of( - transform( - linkDirectory(dirPath, digest, directoriesIndex), - (result) -> { - // note: this could non-trivial make sync due to - // the way decrementReferences is implemented. - // we saw null entries in the built immutable list - // without synchronization - synchronized (inputDirectories) { - inputDirectories.add(digest); - } - return null; - }, - fetchService))); + linkedDirectories.add( + transform( + linkDirectory(dirPath, digest, directoriesIndex), + (result) -> { + // we saw null entries in the built immutable list without synchronization + synchronized (inputDirectories) { + inputDirectories.add(digest); + } + return null; + }, + fetchService)); } if (Thread.currentThread().isInterrupted()) { break; } } - return downloads; + return concat(downloads, linkedDirectories.build()); } @SuppressWarnings("ConstantConditions") @@ -284,70 +324,100 @@ private ListenableFuture linkDirectory( Path execPath, Digest digest, Map directoriesIndex) { return transformAsync( fileCache.putDirectory(digest, directoriesIndex, fetchService), - (cachePath) -> { - Files.createSymbolicLink(execPath, cachePath); + pathResult -> { + Path path = pathResult.getPath(); + if (pathResult.getMissed()) { + log.fine( + String.format( + "putDirectory(%s, %s) created", execPath, DigestUtil.toString(digest))); + } + Files.createSymbolicLink(execPath, path); return immediateFuture(null); }, fetchService); } - private static class ExecDirException extends IOException { - private final Path path; - private final List exceptions; - - ExecDirException(Path path, List exceptions) { - // When printing the exception, show the captured sub-exceptions. - super(getErrorMessage(path, exceptions)); - this.path = path; - this.exceptions = exceptions; - for (Throwable exception : exceptions) { - addSuppressed(exception); - } - } - - Path getPath() { - return path; - } - - List getExceptions() { - return exceptions; - } - } - - private static String getErrorMessage(Path path, List exceptions) { - return String.format("%s: %d %s: %s", path, exceptions.size(), "exceptions", exceptions); - } - private static void checkExecErrors(Path path, List errors) throws ExecDirException { if (!errors.isEmpty()) { throw new ExecDirException(path, errors); } } - private static boolean treeContainsPath( - String directoryPath, Map directoriesIndex, Digest rootDigest) { - Directory directory = directoriesIndex.get(rootDigest); - for (String name : directoryPath.split("/")) { - List subdirs = directory.getDirectoriesList(); - int index = Collections.binarySearch(Lists.transform(subdirs, DirectoryNode::getName), name); - if (index < 0) { - return false; + private static Iterator directoriesIterator( + Digest digest, Map directoriesIndex) { + Directory root = directoriesIndex.get(digest); + return new Iterator() { + boolean atEnd = root.getDirectoriesCount() == 0; + Stack path = new Stack<>(); + Stack> route = new Stack<>(); + Iterator current = root.getDirectoriesList().iterator(); + + @Override + public boolean hasNext() { + return !atEnd; } - directory = directoriesIndex.get(subdirs.get(index).getDigest()); - } - return true; + + @Override + public String next() { + String nextPath; + DirectoryNode next = current.next(); + String name = next.getName(); + path.push(name); + nextPath = String.join("/", path); + Digest digest = next.getDigest(); + if (digest.getSizeBytes() != 0) { + route.push(current); + current = directoriesIndex.get(digest).getDirectoriesList().iterator(); + } else { + path.pop(); + } + while (!current.hasNext() && !route.isEmpty()) { + current = route.pop(); + path.pop(); + } + atEnd = !current.hasNext(); + return nextPath; + } + }; } - private Iterable realDirectories( + private Set linkedDirectories( Map directoriesIndex, Digest rootDigest) { // skip this search if all the directories are real if (linkInputDirectories) { - // somewhat inefficient, but would need many overrides to be painful - return filter( - realInputDirectories, - realInputDirectory -> treeContainsPath(realInputDirectory, directoriesIndex, rootDigest)); + ImmutableSet.Builder builder = ImmutableSet.builder(); + + Iterator dirs = directoriesIterator(rootDigest, directoriesIndex); + while (dirs.hasNext()) { + String dir = dirs.next(); + for (Pattern pattern : linkedInputDirectories) { + if (pattern.matcher(dir).matches()) { + builder.add(dir); + break; // avoid adding the same directory twice + } + } + } + return builder.build(); } - return ImmutableList.of(); + return ImmutableSet.of(); + } + + @VisibleForTesting + static OutputDirectory createOutputDirectory(Command command) { + Iterable files; + Iterable dirs; + if (command.getOutputPathsCount() != 0) { + files = command.getOutputPathsList(); + dirs = ImmutableList.of(); // output paths require the action to create their own directory + } else { + files = command.getOutputFilesList(); + dirs = command.getOutputDirectoriesList(); + } + if (!command.getWorkingDirectory().isEmpty()) { + files = Iterables.transform(files, file -> command.getWorkingDirectory() + "/" + file); + dirs = Iterables.transform(dirs, dir -> command.getWorkingDirectory() + "/" + dir); + } + return OutputDirectory.parse(files, dirs, command.getEnvironmentVariablesList()); } @Override @@ -356,23 +426,25 @@ public Path createExecDir( throws IOException, InterruptedException { log.log(Level.FINEST, "ExecFileSystem::createExecDir(" + operationName + ")"); Digest inputRootDigest = action.getInputRootDigest(); - OutputDirectory outputDirectory = - OutputDirectory.parse( - command.getOutputFilesList(), - concat( - command.getOutputDirectoriesList(), - realDirectories(directoriesIndex, inputRootDigest)), - command.getEnvironmentVariablesList()); + OutputDirectory outputDirectory = createOutputDirectory(command); Path execDir = root.resolve(operationName); if (Files.exists(execDir)) { - Directories.remove(execDir); + Directories.remove(execDir, fileStore); } Files.createDirectories(execDir); ImmutableList.Builder inputFiles = new ImmutableList.Builder<>(); ImmutableList.Builder inputDirectories = new ImmutableList.Builder<>(); + Set linkedInputDirectories = + ImmutableSet.copyOf( + Iterables.transform( + linkedDirectories(directoriesIndex, inputRootDigest), + path -> execDir.resolve(path))); // does this work on windows with / separators? + + log.log( + Level.FINER, "ExecFileSystem::createExecDir(" + operationName + ") calling fetchInputs"); // Get lock keys so we can increment them prior to downloading // and no other threads can to create/delete during // eviction or the invocation of fetchInputs @@ -381,11 +453,17 @@ public Path createExecDir( Iterable> fetchedFutures = fetchInputs( + execDir, execDir, inputRootDigest, directoriesIndex, outputDirectory, - inputFiles, + linkedInputDirectories, + key -> { + synchronized (inputFiles) { + inputFiles.add(key); + } + }, inputDirectories); boolean success = false; try { @@ -425,7 +503,7 @@ public Path createExecDir( log.log(Level.INFO, "Failed to create exec dir (" + operationName + "), cleaning up"); fileCache.decrementReferences(inputFiles.build(), inputDirectories.build()); fileCache.unlockKeys(lockedKeys); - Directories.remove(execDir); + Directories.remove(execDir, fileStore); } } @@ -434,7 +512,7 @@ public Path createExecDir( rootInputDirectories.put(execDir, inputDirectories.build()); log.log( - Level.FINE, + Level.FINER, "ExecFileSystem::createExecDir(" + operationName + ") stamping output directories"); boolean stamped = false; try { @@ -464,7 +542,7 @@ public void destroyExecDir(Path execDir) throws IOException, InterruptedExceptio } fileCache.unlockKeys(lockedKeys); if (Files.exists(execDir)) { - Directories.remove(execDir); + Directories.remove(execDir, fileStore); } } } diff --git a/src/main/java/build/buildfarm/worker/shard/ExecFileSystem.java b/src/main/java/build/buildfarm/worker/shard/ExecFileSystem.java index 916b43cef0..b55601d598 100644 --- a/src/main/java/build/buildfarm/worker/shard/ExecFileSystem.java +++ b/src/main/java/build/buildfarm/worker/shard/ExecFileSystem.java @@ -30,7 +30,7 @@ public interface ExecFileSystem extends InputStreamFactory { void start(Consumer> onDigests, boolean skipLoad) throws IOException, InterruptedException; - void stop(); + void stop() throws InterruptedException; Path root(); diff --git a/src/main/java/build/buildfarm/worker/shard/LocalCasWriter.java b/src/main/java/build/buildfarm/worker/shard/LocalCasWriter.java index 4ab573b778..01881eea72 100644 --- a/src/main/java/build/buildfarm/worker/shard/LocalCasWriter.java +++ b/src/main/java/build/buildfarm/worker/shard/LocalCasWriter.java @@ -66,7 +66,7 @@ private void insertStream(Digest digest, IOSupplier suppliedStream) Write write = getLocalWrite(digest); try (OutputStream out = - write.getOutput(/* deadlineAfter=*/ 1, /* deadlineAfterUnits=*/ DAYS, () -> {}); + write.getOutput(/* deadlineAfter= */ 1, /* deadlineAfterUnits= */ DAYS, () -> {}); InputStream in = suppliedStream.get()) { ByteStreams.copy(in, out); } catch (IOException e) { diff --git a/src/main/java/build/buildfarm/worker/shard/RemoteCasWriter.java b/src/main/java/build/buildfarm/worker/shard/RemoteCasWriter.java index d7d5f0a776..0ba8db48b5 100644 --- a/src/main/java/build/buildfarm/worker/shard/RemoteCasWriter.java +++ b/src/main/java/build/buildfarm/worker/shard/RemoteCasWriter.java @@ -22,6 +22,7 @@ import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.DigestFunction; import build.bazel.remote.execution.v2.RequestMetadata; +import build.buildfarm.backplane.Backplane; import build.buildfarm.common.Size; import build.buildfarm.common.Write; import build.buildfarm.common.grpc.Retrier; @@ -49,13 +50,13 @@ @Log public class RemoteCasWriter implements CasWriter { - private final Set workerSet; + private final Backplane backplane; private final LoadingCache workerStubs; private final Retrier retrier; public RemoteCasWriter( - Set workerSet, LoadingCache workerStubs, Retrier retrier) { - this.workerSet = workerSet; + Backplane backplane, LoadingCache workerStubs, Retrier retrier) { + this.backplane = backplane; this.workerStubs = workerStubs; this.retrier = retrier; } @@ -76,7 +77,7 @@ private void insertFileToCasMember(Digest digest, DigestFunction.Value digestFun Throwable cause = e.getCause(); Throwables.throwIfInstanceOf(cause, IOException.class); Throwables.throwIfUnchecked(cause); - throw new RuntimeException(cause); + throw new IOException(cause); } } @@ -86,6 +87,7 @@ private long writeToCasMember(Digest digest, DigestFunction.Value digestFunction String workerName = getRandomWorker(); Write write = getCasMemberWrite(digest, digestFunction, workerName); + write.reset(); try { return streamIntoWriteFuture(in, write, digest).get(); } catch (ExecutionException e) { @@ -93,7 +95,7 @@ private long writeToCasMember(Digest digest, DigestFunction.Value digestFunction Throwables.throwIfInstanceOf(cause, IOException.class); // prevent a discard of this frame Status status = Status.fromThrowable(cause); - throw status.asRuntimeException(); + throw new IOException(status.asException()); } } @@ -112,36 +114,30 @@ private Write getCasMemberWrite( @Override public void insertBlob(Digest digest, DigestFunction.Value digestFunction, ByteString content) throws IOException, InterruptedException { - insertBlobToCasMember(digest, digestFunction, content); - } - - private void insertBlobToCasMember(Digest digest, DigestFunction.Value digestFunction, ByteString content) - throws IOException, InterruptedException { try (InputStream in = content.newInput()) { retrier.execute(() -> writeToCasMember(digest, digestFunction, in)); } catch (RetryException e) { Throwable cause = e.getCause(); Throwables.throwIfInstanceOf(cause, IOException.class); Throwables.throwIfUnchecked(cause); - throw new RuntimeException(cause); + throw new IOException(cause); } } private String getRandomWorker() throws IOException { - synchronized (workerSet) { - if (workerSet.isEmpty()) { - throw new RuntimeException("no available workers"); - } - Random rand = new Random(); - int index = rand.nextInt(workerSet.size()); - // best case no allocation average n / 2 selection - Iterator iter = workerSet.iterator(); - String worker = null; - while (iter.hasNext() && index-- >= 0) { - worker = iter.next(); - } - return worker; + Set workerSet = backplane.getStorageWorkers(); + if (workerSet.isEmpty()) { + throw new IOException("no available workers"); + } + Random rand = new Random(); + int index = rand.nextInt(workerSet.size()); + // best case no allocation average n / 2 selection + Iterator iter = workerSet.iterator(); + String worker = null; + while (iter.hasNext() && index-- >= 0) { + worker = iter.next(); } + return worker; } private Instance workerStub(String worker) { @@ -164,8 +160,8 @@ private ListenableFuture streamIntoWriteFuture(InputStream in, Write write // the callback closes the stream and prepares the future. FeedbackOutputStream out = write.getOutput( - /* deadlineAfter=*/ 1, - /* deadlineAfterUnits=*/ DAYS, + /* deadlineAfter= */ 1, + /* deadlineAfterUnits= */ DAYS, () -> { try { FeedbackOutputStream outStream = (FeedbackOutputStream) write; diff --git a/src/main/java/build/buildfarm/worker/shard/ShardCASFileCache.java b/src/main/java/build/buildfarm/worker/shard/ShardCASFileCache.java index d745a6194b..38f5dee79b 100644 --- a/src/main/java/build/buildfarm/worker/shard/ShardCASFileCache.java +++ b/src/main/java/build/buildfarm/worker/shard/ShardCASFileCache.java @@ -38,7 +38,6 @@ class ShardCASFileCache extends CASFileCache { long maxEntrySizeInBytes, int maxBucketLevels, boolean storeFileDirsIndexInMemory, - boolean publishTtlMetric, boolean execRootFallback, DigestUtil digestUtil, ExecutorService expireService, @@ -53,12 +52,11 @@ class ShardCASFileCache extends CASFileCache { maxEntrySizeInBytes, maxBucketLevels, storeFileDirsIndexInMemory, - publishTtlMetric, execRootFallback, digestUtil, expireService, accessRecorder, - /* storage=*/ Maps.newConcurrentMap(), + /* storage= */ Maps.newConcurrentMap(), DEFAULT_DIRECTORIES_INDEX_NAME, onPut, onExpire, diff --git a/src/main/java/build/buildfarm/worker/shard/ShardWorkerContext.java b/src/main/java/build/buildfarm/worker/shard/ShardWorkerContext.java index 27e169960a..398e76fc1c 100644 --- a/src/main/java/build/buildfarm/worker/shard/ShardWorkerContext.java +++ b/src/main/java/build/buildfarm/worker/shard/ShardWorkerContext.java @@ -18,6 +18,7 @@ import static build.buildfarm.common.Actions.checkPreconditionFailure; import static build.buildfarm.common.Errors.VIOLATION_TYPE_INVALID; import static build.buildfarm.common.Errors.VIOLATION_TYPE_MISSING; +import static build.buildfarm.worker.DequeueMatchEvaluator.shouldKeepOperation; import static java.lang.String.format; import static java.util.concurrent.TimeUnit.DAYS; @@ -32,12 +33,14 @@ import build.bazel.remote.execution.v2.ExecutionStage; import build.bazel.remote.execution.v2.FileNode; import build.bazel.remote.execution.v2.Platform; +import build.bazel.remote.execution.v2.SymlinkNode; import build.bazel.remote.execution.v2.Tree; import build.buildfarm.backplane.Backplane; import build.buildfarm.common.CommandUtils; import build.buildfarm.common.DigestUtil; import build.buildfarm.common.DigestUtil.ActionKey; import build.buildfarm.common.EntryLimitException; +import build.buildfarm.common.ExecutionProperties; import build.buildfarm.common.InputStreamFactory; import build.buildfarm.common.LinuxSandboxOptions; import build.buildfarm.common.Poller; @@ -54,13 +57,14 @@ import build.buildfarm.v1test.CASInsertionPolicy; import build.buildfarm.v1test.QueueEntry; import build.buildfarm.v1test.QueuedOperation; -import build.buildfarm.worker.DequeueMatchEvaluator; import build.buildfarm.worker.ExecutionPolicies; import build.buildfarm.worker.RetryingMatchListener; import build.buildfarm.worker.WorkerContext; import build.buildfarm.worker.cgroup.Cpu; import build.buildfarm.worker.cgroup.Group; import build.buildfarm.worker.cgroup.Mem; +import build.buildfarm.worker.resources.LocalResourceSet; +import build.buildfarm.worker.resources.LocalResourceSetUtils; import build.buildfarm.worker.resources.ResourceDecider; import build.buildfarm.worker.resources.ResourceLimits; import com.google.common.annotations.VisibleForTesting; @@ -79,6 +83,7 @@ import io.grpc.Status; import io.grpc.StatusException; import io.prometheus.client.Counter; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.file.FileVisitResult; @@ -93,6 +98,7 @@ import java.util.Map; import java.util.Stack; import java.util.logging.Level; +import javax.annotation.Nullable; import lombok.extern.java.Log; @Log @@ -129,9 +135,11 @@ class ShardWorkerContext implements WorkerContext { private final Group operationsGroup = executionsGroup.getChild("operations"); private final CasWriter writer; private final boolean errorOperationRemainingResources; + private final LocalResourceSet resourceSet; + private final boolean errorOperationOutputSizeExceeded; static SetMultimap getMatchProvisions( - Iterable policies, int executeStageWidth) { + Iterable policies, String name, int executeStageWidth) { ImmutableSetMultimap.Builder provisions = ImmutableSetMultimap.builder(); Platform matchPlatform = ExecutionPolicies.getMatchPlatform( @@ -140,6 +148,7 @@ static SetMultimap getMatchProvisions( provisions.put(property.getName(), property.getValue()); } provisions.put(PROVISION_CORES_NAME, String.format("%d", executeStageWidth)); + provisions.put(ExecutionProperties.WORKER, name); return provisions.build(); } @@ -162,9 +171,11 @@ static SetMultimap getMatchProvisions( boolean onlyMulticoreTests, boolean allowBringYourOwnContainer, boolean errorOperationRemainingResources, + boolean errorOperationOutputSizeExceeded, + LocalResourceSet resourceSet, CasWriter writer) { this.name = name; - this.matchProvisions = getMatchProvisions(policies, executeStageWidth); + this.matchProvisions = getMatchProvisions(policies, name, executeStageWidth); this.operationPollPeriod = operationPollPeriod; this.operationPoller = operationPoller; this.inputFetchStageWidth = inputFetchStageWidth; @@ -182,6 +193,8 @@ static SetMultimap getMatchProvisions( this.onlyMulticoreTests = onlyMulticoreTests; this.allowBringYourOwnContainer = allowBringYourOwnContainer; this.errorOperationRemainingResources = errorOperationRemainingResources; + this.errorOperationOutputSizeExceeded = errorOperationOutputSizeExceeded; + this.resourceSet = resourceSet; this.writer = writer; } @@ -240,12 +253,12 @@ public void resumePoller( } else { operationPollerCounter.inc(); log.log( - Level.INFO, format("%s: poller: Completed Poll for %s: OK", name, operationName)); + Level.FINE, format("%s: poller: Completed Poll for %s: OK", name, operationName)); } return success; }, () -> { - log.log(Level.INFO, format("%s: poller: Deadline expired for %s", name, operationName)); + log.log(Level.FINE, format("%s: poller: Deadline expired for %s", name, operationName)); onFailure.run(); }, deadline); @@ -273,6 +286,16 @@ public QueuedOperation getQueuedOperation(QueueEntry queueEntry) @SuppressWarnings("ConstantConditions") private void matchInterruptible(MatchListener listener) throws IOException, InterruptedException { + QueueEntry queueEntry = takeEntryOffOperationQueue(listener); + if (queueEntry == null || shouldKeepOperation(matchProvisions, resourceSet, queueEntry)) { + listener.onEntry(queueEntry); + } else { + backplane.rejectOperation(queueEntry); + } + } + + private @Nullable QueueEntry takeEntryOffOperationQueue(MatchListener listener) + throws IOException, InterruptedException { listener.onWaitStart(); QueueEntry queueEntry = null; try { @@ -294,16 +317,12 @@ private void matchInterruptible(MatchListener listener) throws IOException, Inte // transient backplane errors will propagate a null queueEntry } listener.onWaitEnd(); + return queueEntry; + } - if (queueEntry == null - || DequeueMatchEvaluator.shouldKeepOperation(matchProvisions, queueEntry)) { - listener.onEntry(queueEntry); - } else { - backplane.rejectOperation(queueEntry); - } - if (Thread.interrupted()) { - throw new InterruptedException(); - } + @Override + public void returnLocalResources(QueueEntry queueEntry) { + LocalResourceSetUtils.releaseClaims(queueEntry.getPlatform(), resourceSet); } @Override @@ -314,7 +333,7 @@ public void match(MatchListener listener) throws InterruptedException { @Override public boolean getMatched() { - return !matched; + return matched; } @Override @@ -328,20 +347,28 @@ public void onWaitEnd() { } @Override - public boolean onEntry(QueueEntry queueEntry) throws InterruptedException { + public boolean onEntry(@Nullable QueueEntry queueEntry) throws InterruptedException { if (queueEntry == null) { matched = true; return listener.onEntry(null); } + return onValidEntry(queueEntry); + } + + private boolean onValidEntry(QueueEntry queueEntry) throws InterruptedException { String operationName = queueEntry.getExecuteEntry().getOperationName(); if (activeOperations.putIfAbsent(operationName, queueEntry) != null) { log.log(Level.WARNING, "matched duplicate operation " + operationName); return false; } + return onUniqueEntry(queueEntry); + } + + private boolean onUniqueEntry(QueueEntry queueEntry) throws InterruptedException { matched = true; boolean success = listener.onEntry(queueEntry); if (!success) { - requeue(operationName); + requeue(queueEntry.getExecuteEntry().getOperationName()); } return success; } @@ -351,13 +378,8 @@ public void onError(Throwable t) { Throwables.throwIfUnchecked(t); throw new RuntimeException(t); } - - @Override - public void setOnCancelHandler(Runnable onCancelHandler) { - listener.setOnCancelHandler(onCancelHandler); - } }; - while (dedupMatchListener.getMatched()) { + while (!dedupMatchListener.getMatched()) { try { matchInterruptible(dedupMatchListener); } catch (IOException e) { @@ -477,15 +499,26 @@ private void updateActionResultStdOutputs(ActionResult.Builder resultBuilder) } } + private static String toREOutputPath(String nativePath) { + // RE API OutputFile/Directory path + // The path separator is a forward slash `/`. + if (File.separatorChar != '/') { + return nativePath.replace(File.separatorChar, '/'); + } + return nativePath; + } + private void uploadOutputFile( ActionResult.Builder resultBuilder, Path outputPath, - Path actionRoot, + Path workingDirectory, + String entrySizeViolationType, PreconditionFailure.Builder preconditionFailure) throws IOException, InterruptedException { - String outputFile = actionRoot.relativize(outputPath).toString(); + String outputFile = toREOutputPath(workingDirectory.relativize(outputPath).toString()); + if (!Files.exists(outputPath)) { - log.log(Level.FINE, "ReportResultStage: " + outputFile + " does not exist..."); + log.log(Level.FINER, "ReportResultStage: " + outputFile + " does not exist..."); return; } @@ -493,7 +526,7 @@ private void uploadOutputFile( String message = String.format( "ReportResultStage: %s is a directory but it should have been a file", outputPath); - log.log(Level.FINE, message); + log.log(Level.FINER, message); preconditionFailure .addViolationsBuilder() .setType(VIOLATION_TYPE_INVALID) @@ -507,11 +540,12 @@ private void uploadOutputFile( if (maxEntrySize != UNLIMITED_ENTRY_SIZE_MAX && size > maxEntrySize) { String message = String.format( - "ReportResultStage: The output %s could not be uploaded because it exceeded the maximum size of an entry (%d > %d)", + "ReportResultStage: The output %s could not be uploaded because it exceeded the" + + " maximum size of an entry (%d > %d)", outputPath, size, maxEntrySize); preconditionFailure .addViolationsBuilder() - .setType(VIOLATION_TYPE_MISSING) + .setType(entrySizeViolationType) .setSubject(outputFile + ": " + size) .setDescription(message); return; @@ -539,7 +573,7 @@ private void uploadOutputFile( } catch (EntryLimitException e) { preconditionFailure .addViolationsBuilder() - .setType(VIOLATION_TYPE_MISSING) + .setType(entrySizeViolationType) .setSubject("blobs/" + DigestUtil.toString(digest)) .setDescription( "An output could not be uploaded because it exceeded the maximum size of an entry"); @@ -550,6 +584,7 @@ private void uploadOutputFile( static class OutputDirectoryContext { private final List files = new ArrayList<>(); private final List directories = new ArrayList<>(); + private final List symlinks = new ArrayList<>(); void addFile(FileNode fileNode) { files.add(fileNode); @@ -559,27 +594,38 @@ void addDirectory(DirectoryNode directoryNode) { directories.add(directoryNode); } + void addSymlink(SymlinkNode symlinkNode) { + symlinks.add(symlinkNode); + } + Directory toDirectory() { files.sort(Comparator.comparing(FileNode::getName)); directories.sort(Comparator.comparing(DirectoryNode::getName)); - return Directory.newBuilder().addAllFiles(files).addAllDirectories(directories).build(); + symlinks.sort(Comparator.comparing(SymlinkNode::getName)); + return Directory.newBuilder() + .addAllFiles(files) + .addAllDirectories(directories) + .addAllSymlinks(symlinks) + .build(); } } private void uploadOutputDirectory( ActionResult.Builder resultBuilder, Path outputDirPath, - Path actionRoot, + Path workingDirectory, + String entrySizeViolationType, PreconditionFailure.Builder preconditionFailure) throws IOException, InterruptedException { - String outputDir = actionRoot.relativize(outputDirPath).toString(); + String outputDir = toREOutputPath(workingDirectory.relativize(outputDirPath).toString()); + if (!Files.exists(outputDirPath)) { - log.log(Level.FINE, "ReportResultStage: " + outputDir + " does not exist..."); + log.log(Level.FINER, "ReportResultStage: " + outputDir + " does not exist..."); return; } if (!Files.isDirectory(outputDirPath)) { - log.log(Level.FINE, "ReportResultStage: " + outputDir + " is not a directory..."); + log.log(Level.FINER, "ReportResultStage: " + outputDir + " is not a directory..."); preconditionFailure .addViolationsBuilder() .setType(VIOLATION_TYPE_INVALID) @@ -599,8 +645,30 @@ private void uploadOutputDirectory( @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (configs.getWorker().isCreateSymlinkOutputs() && attrs.isSymbolicLink()) { + visitSymbolicLink(file); + } else { + visitRegularFile(file, attrs); + } + return FileVisitResult.CONTINUE; + } + + private void visitSymbolicLink(Path file) throws IOException { + // TODO convert symlinks with absolute targets within execution root to relative ones + currentDirectory.addSymlink( + SymlinkNode.newBuilder() + .setName(file.getFileName().toString()) + .setTarget(Files.readSymbolicLink(file).toString()) + .build()); + } + + private void visitRegularFile(Path file, BasicFileAttributes attrs) throws IOException { Digest digest; try { + // should we create symlink nodes in output? + // is buildstream trying to execute in a specific container?? + // can get to NSFE for nonexistent symlinks + // can fail outright for a symlink to a directory digest = getDigestUtil().compute(file); } catch (NoSuchFileException e) { log.log( @@ -609,7 +677,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) "error visiting file %s under output dir %s", outputDirPath.relativize(file), outputDirPath.toAbsolutePath()), e); - return FileVisitResult.CONTINUE; + return; } // should we cast to PosixFilePermissions and do gymnastics there for executable? @@ -628,12 +696,12 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) } catch (EntryLimitException e) { preconditionFailure .addViolationsBuilder() - .setType(VIOLATION_TYPE_MISSING) + .setType(entrySizeViolationType) .setSubject("blobs/" + DigestUtil.toString(digest)) .setDescription( - "An output could not be uploaded because it exceeded the maximum size of an entry"); + "An output could not be uploaded because it exceeded the maximum size of an" + + " entry"); } - return FileVisitResult.CONTINUE; } @Override @@ -676,14 +744,28 @@ public FileVisitResult postVisitDirectory(Path dir, IOException exc) { public void uploadOutputs( Digest actionDigest, ActionResult.Builder resultBuilder, Path actionRoot, Command command) throws IOException, InterruptedException, StatusException { + String entrySizeViolationType = + errorOperationOutputSizeExceeded ? VIOLATION_TYPE_INVALID : VIOLATION_TYPE_MISSING; + PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder(); - List outputPaths = CommandUtils.getResolvedOutputPaths(command, actionRoot); + Path workingDirectory = actionRoot.resolve(command.getWorkingDirectory()); + List outputPaths = CommandUtils.getResolvedOutputPaths(command, workingDirectory); for (Path outputPath : outputPaths) { if (Files.isDirectory(outputPath)) { - uploadOutputDirectory(resultBuilder, outputPath, actionRoot, preconditionFailure); + uploadOutputDirectory( + resultBuilder, + outputPath, + workingDirectory, + entrySizeViolationType, + preconditionFailure); } else { - uploadOutputFile(resultBuilder, outputPath, actionRoot, preconditionFailure); + uploadOutputFile( + resultBuilder, + outputPath, + workingDirectory, + entrySizeViolationType, + preconditionFailure); } } checkPreconditionFailure(actionDigest, preconditionFailure.build()); @@ -702,7 +784,7 @@ public boolean putOperation(Operation operation) throws IOException, Interrupted boolean success = createBackplaneRetrier().execute(() -> instance.putOperation(operation)); if (success && operation.getDone()) { completedOperations.inc(); - log.log(Level.FINE, "CompletedOperation: " + operation.getName()); + log.log(Level.FINER, "CompletedOperation: " + operation.getName()); } return success; } @@ -763,7 +845,7 @@ boolean shouldLimitCoreUsage() { @Override public void createExecutionLimits() { - if (shouldLimitCoreUsage()) { + if (shouldLimitCoreUsage() && configs.getWorker().getSandboxSettings().isAlwaysUseCgroups()) { createOperationExecutionLimits(); } } @@ -795,11 +877,13 @@ void createOperationExecutionLimits() { @Override public void destroyExecutionLimits() { - try { - operationsGroup.getCpu().close(); - executionsGroup.getCpu().close(); - } catch (IOException e) { - throw new RuntimeException(e); + if (configs.getWorker().getSandboxSettings().isAlwaysUseCgroups()) { + try { + operationsGroup.getCpu().close(); + executionsGroup.getCpu().close(); + } catch (IOException e) { + throw new RuntimeException(e); + } } } @@ -857,28 +941,31 @@ IOResource limitSpecifiedExecution( // ResourceLimits object. We apply the cgroup settings to file resources // and collect group names to use on the CLI. String operationId = getOperationId(operationName); - final Group group = operationsGroup.getChild(operationId); ArrayList resources = new ArrayList<>(); - ArrayList usedGroups = new ArrayList<>(); - // Possibly set core restrictions. - if (limits.cpu.limit) { - applyCpuLimits(group, limits, resources); - usedGroups.add(group.getCpu().getName()); - } + if (limits.cgroups) { + final Group group = operationsGroup.getChild(operationId); + ArrayList usedGroups = new ArrayList<>(); - // Possibly set memory restrictions. - if (limits.mem.limit) { - applyMemLimits(group, limits, resources); - usedGroups.add(group.getMem().getName()); - } + // Possibly set core restrictions. + if (limits.cpu.limit) { + applyCpuLimits(group, limits, resources); + usedGroups.add(group.getCpu().getName()); + } + + // Possibly set memory restrictions. + if (limits.mem.limit) { + applyMemLimits(group, limits, resources); + usedGroups.add(group.getMem().getName()); + } - // Decide the CLI for running under cgroups - if (!usedGroups.isEmpty()) { - arguments.add( - configs.getExecutionWrappers().getCgroups(), - "-g", - String.join(",", usedGroups) + ":" + group.getHierarchy()); + // Decide the CLI for running under cgroups + if (!usedGroups.isEmpty()) { + arguments.add( + configs.getExecutionWrappers().getCgroups(), + "-g", + String.join(",", usedGroups) + ":" + group.getHierarchy()); + } } // Possibly set network restrictions. @@ -896,6 +983,10 @@ IOResource limitSpecifiedExecution( addLinuxSandboxCli(arguments, options); } + if (configs.getWorker().getSandboxSettings().isAlwaysUseAsNobody() || limits.fakeUsername) { + arguments.add(configs.getExecutionWrappers().getAsNobody()); + } + if (limits.time.skipSleep) { arguments.add(configs.getExecutionWrappers().getSkipSleep()); @@ -934,13 +1025,11 @@ private LinuxSandboxOptions decideLinuxSandboxOptions( // TODO: provide proper support for bazel sandbox's fakeUsername "-U" flag. // options.fakeUsername = limits.fakeUsername; - // these were hardcoded in bazel based on a filesystem configuration typical to ours - // TODO: they may be incorrect for say Windows, and support will need adjusted in the future. - options.writableFiles.add("/tmp"); - options.writableFiles.add("/dev/shm"); + options.writableFiles.addAll( + configs.getWorker().getSandboxSettings().getAdditionalWritePaths()); if (limits.tmpFs) { - options.tmpfsDirs.add("/tmp"); + options.writableFiles.addAll(configs.getWorker().getSandboxSettings().getTmpFsPaths()); } if (limits.debugAfterExecution) { @@ -960,8 +1049,6 @@ private LinuxSandboxOptions decideLinuxSandboxOptions( private void addLinuxSandboxCli( ImmutableList.Builder arguments, LinuxSandboxOptions options) { - arguments.add(configs.getExecutionWrappers().getAsNobody()); - // Choose the sandbox which is built and deployed with the worker image. arguments.add(configs.getExecutionWrappers().getLinuxSandbox()); diff --git a/src/main/java/build/buildfarm/worker/shard/ShutDownWorkerGracefully.java b/src/main/java/build/buildfarm/worker/shard/ShutDownWorkerGracefully.java index 63b1b205cb..7d394c9731 100644 --- a/src/main/java/build/buildfarm/worker/shard/ShutDownWorkerGracefully.java +++ b/src/main/java/build/buildfarm/worker/shard/ShutDownWorkerGracefully.java @@ -14,19 +14,14 @@ package build.buildfarm.worker.shard; -import static java.util.logging.Level.WARNING; - -import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.v1test.PrepareWorkerForGracefulShutDownRequest; import build.buildfarm.v1test.PrepareWorkerForGracefulShutDownRequestResults; import build.buildfarm.v1test.ShutDownWorkerGrpc; import io.grpc.stub.StreamObserver; -import java.util.concurrent.CompletableFuture; import lombok.extern.java.Log; @Log public class ShutDownWorkerGracefully extends ShutDownWorkerGrpc.ShutDownWorkerImplBase { - private static BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); private final Worker worker; public ShutDownWorkerGracefully(Worker worker) { @@ -44,35 +39,8 @@ public ShutDownWorkerGracefully(Worker worker) { public void prepareWorkerForGracefulShutdown( PrepareWorkerForGracefulShutDownRequest request, StreamObserver responseObserver) { - String clusterId = configs.getServer().getClusterId(); - String clusterEndpoint = configs.getServer().getAdmin().getClusterEndpoint(); - if (clusterId == null - || clusterId.equals("") - || clusterEndpoint == null - || clusterEndpoint.equals("")) { - String errorMessage = - String.format( - "Current AdminConfig doesn't have cluster_id or cluster_endpoint set, " - + "the worker %s won't be shut down.", - configs.getWorker().getPublicName()); - log.log(WARNING, errorMessage); - responseObserver.onError(new RuntimeException(errorMessage)); - return; - } - - if (!configs.getServer().getAdmin().isEnableGracefulShutdown()) { - String errorMessage = - String.format( - "Current AdminConfig doesn't support shut down worker gracefully, " - + "the worker %s won't be shut down.", - configs.getWorker().getPublicName()); - log.log(WARNING, errorMessage); - responseObserver.onError(new RuntimeException(errorMessage)); - return; - } - try { - CompletableFuture.runAsync(worker::prepareWorkerForGracefulShutdown); + worker.initiateShutdown(); responseObserver.onNext(PrepareWorkerForGracefulShutDownRequestResults.newBuilder().build()); responseObserver.onCompleted(); } catch (Exception e) { diff --git a/src/main/java/build/buildfarm/worker/shard/Worker.java b/src/main/java/build/buildfarm/worker/shard/Worker.java index 3e1ccb6669..60f8cf34d4 100644 --- a/src/main/java/build/buildfarm/worker/shard/Worker.java +++ b/src/main/java/build/buildfarm/worker/shard/Worker.java @@ -21,14 +21,12 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; import static java.util.concurrent.Executors.newSingleThreadExecutor; -import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor; import static java.util.concurrent.TimeUnit.SECONDS; import static java.util.logging.Level.INFO; import static java.util.logging.Level.SEVERE; import build.bazel.remote.execution.v2.Compressor; import build.bazel.remote.execution.v2.Digest; -import build.buildfarm.admin.aws.AwsAdmin; import build.buildfarm.backplane.Backplane; import build.buildfarm.cas.ContentAddressableStorage; import build.buildfarm.cas.ContentAddressableStorage.Blob; @@ -37,11 +35,13 @@ import build.buildfarm.common.BuildfarmExecutors; import build.buildfarm.common.DigestUtil; import build.buildfarm.common.InputStreamFactory; +import build.buildfarm.common.LoggingMain; import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.common.config.Cas; import build.buildfarm.common.config.GrpcMetrics; import build.buildfarm.common.grpc.Retrier; import build.buildfarm.common.grpc.Retrier.Backoff; +import build.buildfarm.common.grpc.TracingMetadataUtils.ServerHeadersInterceptor; import build.buildfarm.common.services.ByteStreamService; import build.buildfarm.common.services.ContentAddressableStorageService; import build.buildfarm.instance.Instance; @@ -58,10 +58,10 @@ import build.buildfarm.worker.PipelineStage; import build.buildfarm.worker.PutOperationStage; import build.buildfarm.worker.ReportResultStage; +import build.buildfarm.worker.SuperscalarPipelineStage; +import build.buildfarm.worker.resources.LocalResourceSetUtils; import com.google.common.cache.LoadingCache; import com.google.common.collect.Lists; -import com.google.common.util.concurrent.SettableFuture; -import com.google.devtools.common.options.OptionsParsingException; import com.google.longrunning.Operation; import com.google.protobuf.ByteString; import com.google.protobuf.Duration; @@ -70,13 +70,16 @@ import io.grpc.Status; import io.grpc.Status.Code; import io.grpc.health.v1.HealthCheckResponse.ServingStatus; -import io.grpc.services.HealthStatusManager; +import io.grpc.protobuf.services.HealthStatusManager; +import io.grpc.protobuf.services.ProtoReflectionService; import io.prometheus.client.Counter; import io.prometheus.client.Gauge; import java.io.File; import java.io.IOException; import java.nio.file.FileSystem; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; import java.nio.file.attribute.UserPrincipal; import java.util.Arrays; import java.util.List; @@ -84,24 +87,14 @@ import java.util.UUID; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Level; import javax.annotation.Nullable; -import javax.annotation.PostConstruct; -import javax.annotation.PreDestroy; import javax.naming.ConfigurationException; import lombok.extern.java.Log; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.SpringApplication; -import org.springframework.boot.autoconfigure.SpringBootApplication; -import org.springframework.context.ApplicationContext; -import org.springframework.context.annotation.ComponentScan; @Log -@SpringBootApplication -@ComponentScan("build.buildfarm") -public class Worker { +public final class Worker extends LoggingMain { private static final java.util.logging.Logger nettyLogger = java.util.logging.Logger.getLogger("io.grpc.netty"); private static final Counter healthCheckMetric = @@ -130,9 +123,8 @@ public class Worker { private boolean inGracefulShutdown = false; private boolean isPaused = false; - private ShardWorkerInstance instance; + private WorkerInstance instance; - @SuppressWarnings("deprecation") private final HealthStatusManager healthStatusManager = new HealthStatusManager(); private Server server; @@ -142,92 +134,57 @@ public class Worker { private Pipeline pipeline; private Backplane backplane; private LoadingCache workerStubs; - @Autowired private AwsAdmin awsAdmin; + private AtomicBoolean released = new AtomicBoolean(true); - @Autowired private ApplicationContext springContext; /** - * The method will prepare the worker for graceful shutdown and send out grpc request to disable - * scale in protection when the worker is ready. If unexpected errors happened, it will cancel the - * graceful shutdown progress make the worker available again. + * The method will prepare the worker for graceful shutdown when the worker is ready. Note on + * using stderr here instead of log. By the time this is called in PreDestroy, the log is no + * longer available and is not logging messages. */ public void prepareWorkerForGracefulShutdown() { - inGracefulShutdown = true; - log.log( - Level.INFO, - "The current worker will not be registered again and should be shutdown gracefully!"); - pipeline.stopMatchingOperations(); - int scanRate = 30; // check every 30 seconds - int timeWaited = 0; - int timeOut = 60 * 15; // 15 minutes - - try { - while (!pipeline.isEmpty() && timeWaited < timeOut) { - SECONDS.sleep(scanRate); - timeWaited += scanRate; - log.log(INFO, String.format("Pipeline is still not empty after %d seconds.", timeWaited)); - } - } catch (InterruptedException e) { - log.log(Level.SEVERE, "The worker gracefully shutdown is interrupted: " + e.getMessage()); - } finally { - // make a grpc call to disable scale protection - String clusterEndpoint = configs.getServer().getAdmin().getClusterEndpoint(); - log.log( - INFO, - String.format( - "It took the worker %d seconds to %s", - timeWaited, - pipeline.isEmpty() ? "finish all actions" : "but still cannot finish all actions")); + if (configs.getWorker().getGracefulShutdownSeconds() == 0) { + log.info( + "Graceful Shutdown is not enabled. Worker is shutting down without finishing executions" + + " in progress."); + } else { + inGracefulShutdown = true; + log.info( + "Graceful Shutdown - The current worker will not be registered again and should be" + + " shutdown gracefully!"); + pipeline.stopMatchingOperations(); + int scanRate = 30; // check every 30 seconds + int timeWaited = 0; + int timeOut = configs.getWorker().getGracefulShutdownSeconds(); try { - awsAdmin.disableHostScaleInProtection(clusterEndpoint, configs.getWorker().getPublicName()); - } catch (Exception e) { - log.log( - SEVERE, + if (pipeline.isEmpty()) { + log.info("Graceful Shutdown - no work in the pipeline."); + } else { + log.info("Graceful Shutdown - waiting for executions to finish."); + } + while (!pipeline.isEmpty() && timeWaited < timeOut) { + SECONDS.sleep(scanRate); + timeWaited += scanRate; + log.info( + String.format( + "Graceful Shutdown - Pipeline is still not empty after %d seconds.", timeWaited)); + } + } catch (InterruptedException e) { + log.info( + "Graceful Shutdown - The worker gracefully shutdown is interrupted: " + e.getMessage()); + } finally { + log.info( String.format( - "gRPC call to AdminService to disable scale in protection failed with exception: %s and stacktrace %s", - e.getMessage(), Arrays.toString(e.getStackTrace()))); - // Gracefully shutdown cannot be performed successfully because of error in - // AdminService side. Under this scenario, the worker has to be added back to the worker - // pool. - inGracefulShutdown = false; + "Graceful Shutdown - It took the worker %d seconds to %s", + timeWaited, + pipeline.isEmpty() + ? "finish all actions" + : "gracefully shutdown but still cannot finish all actions")); } } } - private void exitPostPipelineFailure() { - // Shutdown the worker if a pipeline fails. By means of the spring lifecycle - // hooks - e.g. the `PreDestroy` hook here - it will attempt to gracefully - // spin down the pipeline - - // By calling these spring shutdown facilities; we're open to the risk that - // a subsystem may be hanging a criticial thread indeffinitly. Deadline the - // shutdown workflow to ensure we don't leave a zombie worker in this - // situation - ScheduledExecutorService shutdownDeadlineExecutor = newSingleThreadScheduledExecutor(); - - // This may be shorter than the action timeout; assume we have interrupted - // actions in a fatal uncaught exception. - int forceShutdownDeadline = 60; - ScheduledFuture termFuture = - shutdownDeadlineExecutor.schedule( - new Runnable() { - public void run() { - log.log( - Level.SEVERE, - String.format( - "Force terminating due to shutdown deadline exceeded (%d seconds)", - forceShutdownDeadline)); - System.exit(1); - } - }, - forceShutdownDeadline, - SECONDS); - - // Consider defining exit codes to better afford out of band instance - // recovery - int code = SpringApplication.exit(springContext, () -> 1); - termFuture.cancel(false); - shutdownDeadlineExecutor.shutdown(); - System.exit(code); + private Worker() { + super("BuildFarmShardWorker"); } private Operation stripOperation(Operation operation) { @@ -240,7 +197,7 @@ private Operation stripQueuedOperation(Operation operation) { private Server createServer( ServerBuilder serverBuilder, - ContentAddressableStorage storage, + @Nullable CASFileCache storage, Instance instance, Pipeline pipeline, ShardWorkerContext context) { @@ -248,19 +205,20 @@ private Server createServer( serverBuilder.addService(new ContentAddressableStorageService(instance)); serverBuilder.addService(new ByteStreamService(instance)); serverBuilder.addService(new ShutDownWorkerGracefully(this)); + serverBuilder.addService(ProtoReflectionService.newInstance()); // We will build a worker's server based on it's capabilities. // A worker that is capable of execution will construct an execution pipeline. // It will use various execution phases for it's profile service. // On the other hand, a worker that is only capable of CAS storage does not need a pipeline. if (configs.getWorker().getCapabilities().isExecution()) { - PipelineStage completeStage = - new PutOperationStage((operation) -> context.deactivate(operation.getName())); + PutOperationStage completeStage = + new PutOperationStage(operation -> context.deactivate(operation.getName())); PipelineStage errorStage = completeStage; /* new ErrorStage(); */ PipelineStage reportResultStage = new ReportResultStage(context, completeStage, errorStage); - PipelineStage executeActionStage = + SuperscalarPipelineStage executeActionStage = new ExecuteActionStage(context, reportResultStage, errorStage); - PipelineStage inputFetchStage = + SuperscalarPipelineStage inputFetchStage = new InputFetchStage(context, executeActionStage, new PutOperationStage(context::requeue)); PipelineStage matchStage = new MatchStage(context, inputFetchStage, errorStage); @@ -271,9 +229,16 @@ private Server createServer( serverBuilder.addService( new WorkerProfileService( - storage, inputFetchStage, executeActionStage, context, completeStage, backplane)); + storage, + matchStage, + inputFetchStage, + executeActionStage, + reportResultStage, + completeStage, + backplane)); } GrpcMetrics.handleGrpcMetricIntercepts(serverBuilder, configs.getWorker().getGrpcMetrics()); + serverBuilder.intercept(new ServerHeadersInterceptor()); return serverBuilder.build(); } @@ -396,7 +361,6 @@ private ContentAddressableStorage createStorage( // delegate level cas.getHexBucketLevels(), cas.isFileDirectoriesIndexInMemory(), - cas.isPublishTtlMetric(), cas.isExecRootCopyFallback(), digestUtil, removeDirectoryService, @@ -418,11 +382,12 @@ private ExecFileSystem createCFCExecFileSystem( fileCache, owner, configs.getWorker().isLinkInputDirectories(), - configs.getWorker().getRealInputDirectories(), + configs.getWorker().getLinkedInputDirectories(), + configs.isAllowSymlinkTargetAbsolute(), removeDirectoryService, accessRecorder - /* deadlineAfter=*/ - /* deadlineAfterUnits=*/ ); + /* deadlineAfter= */ + /* deadlineAfterUnits= */ ); } private void onStoragePut(Digest digest) { @@ -493,6 +458,7 @@ private void startFailsafeRegistration() { String endpoint = configs.getWorker().getPublicName(); ShardWorker.Builder worker = ShardWorker.newBuilder().setEndpoint(endpoint); worker.setWorkerType(configs.getWorker().getWorkerType()); + worker.setFirstRegisteredAt(loadWorkerStartTimeInMillis()); int registrationIntervalMillis = 10000; int registrationOffsetMillis = registrationIntervalMillis * 3; new Thread( @@ -543,13 +509,6 @@ public void run() { } } catch (InterruptedException e) { // ignore - } finally { - try { - stop(); - } catch (InterruptedException ie) { - log.log(SEVERE, "interrupted while stopping worker", ie); - // ignore - } } } }, @@ -557,7 +516,19 @@ public void run() { .start(); } + private long loadWorkerStartTimeInMillis() { + try { + File cache = new File(configs.getWorker().getRoot() + "/cache"); + return Files.readAttributes(cache.toPath(), BasicFileAttributes.class) + .creationTime() + .toMillis(); + } catch (IOException e) { + return System.currentTimeMillis(); + } + } + public void start() throws ConfigurationException, InterruptedException, IOException { + released.set(false); String session = UUID.randomUUID().toString(); ServerBuilder serverBuilder = ServerBuilder.forPort(configs.getWorker().getPort()); String identifier = "buildfarm-worker-" + configs.getWorker().getPublicName() + "-" + session; @@ -570,7 +541,12 @@ public void start() throws ConfigurationException, InterruptedException, IOExcep if (SHARD.equals(configs.getBackplane().getType())) { backplane = - new RedisShardBackplane(identifier, this::stripOperation, this::stripQueuedOperation); + new RedisShardBackplane( + identifier, + /* subscribeToBackplane= */ false, + /* runFailsafeOperation= */ false, + this::stripOperation, + this::stripQueuedOperation); backplane.start(configs.getWorker().getPublicName()); } else { throw new IllegalArgumentException("Shard Backplane not set in config"); @@ -602,14 +578,13 @@ public void start() throws ConfigurationException, InterruptedException, IOExcep remoteInputStreamFactory, removeDirectoryService, accessRecorder, storage); instance = - new ShardWorkerInstance( - configs.getWorker().getPublicName(), digestUtil, backplane, storage); + new WorkerInstance(configs.getWorker().getPublicName(), digestUtil, backplane, storage); // Create the appropriate writer for the context CasWriter writer; if (!configs.getWorker().getCapabilities().isCas()) { Retrier retrier = new Retrier(Backoff.sequential(5), Retrier.DEFAULT_IS_RETRIABLE); - writer = new RemoteCasWriter(backplane.getStorageWorkers(), workerStubs, retrier); + writer = new RemoteCasWriter(backplane, workerStubs, retrier); } else { writer = new LocalCasWriter(execFileSystem); } @@ -636,10 +611,12 @@ public void start() throws ConfigurationException, InterruptedException, IOExcep configs.getWorker().isOnlyMulticoreTests(), configs.getWorker().isAllowBringYourOwnContainer(), configs.getWorker().isErrorOperationRemainingResources(), + configs.getWorker().isErrorOperationOutputSizeExceeded(), + LocalResourceSetUtils.create(configs.getWorker().getResources()), writer); pipeline = new Pipeline(); - server = createServer(serverBuilder, storage, instance, pipeline, context); + server = createServer(serverBuilder, (CASFileCache) storage, instance, pipeline, context); removeWorker(configs.getWorker().getPublicName()); @@ -653,13 +630,7 @@ public void start() throws ConfigurationException, InterruptedException, IOExcep PrometheusPublisher.startHttpServer(configs.getPrometheusPort()); startFailsafeRegistration(); - // Listen for pipeline unhandled exceptions - ExecutorService pipelineExceptionExecutor = newSingleThreadExecutor(); - SettableFuture pipelineExceptionFuture = SettableFuture.create(); - pipelineExceptionFuture.addListener(this::exitPostPipelineFailure, pipelineExceptionExecutor); - - pipeline.start(pipelineExceptionFuture); - + pipeline.start(); healthCheckMetric.labels("start").inc(); executionSlotsTotal.set(configs.getWorker().getExecuteStageWidth()); inputFetchSlotsTotal.set(configs.getWorker().getInputFetchStageWidth()); @@ -667,9 +638,44 @@ public void start() throws ConfigurationException, InterruptedException, IOExcep log.log(INFO, String.format("%s initialized", identifier)); } - @PreDestroy - public void stop() throws InterruptedException { - System.err.println("*** shutting down gRPC server since JVM is shutting down"); + @Override + protected void onShutdown() throws InterruptedException { + initiateShutdown(); + awaitRelease(); + } + + private void awaitTermination() throws InterruptedException { + pipeline.join(); + server.awaitTermination(); + } + + public void initiateShutdown() { + if (pipeline != null) { + pipeline.stopMatchingOperations(); + } + if (server != null) { + server.shutdown(); + } + } + + private synchronized void awaitRelease() throws InterruptedException { + while (!released.get()) { + wait(); + } + } + + public synchronized void stop() throws InterruptedException { + try { + shutdown(); + } finally { + released.set(true); + notify(); + } + } + + private void shutdown() throws InterruptedException { + log.info("*** shutting down gRPC server since JVM is shutting down"); + prepareWorkerForGracefulShutdown(); PrometheusPublisher.stopHttpServer(); boolean interrupted = Thread.interrupted(); if (pipeline != null) { @@ -687,11 +693,12 @@ public void stop() throws InterruptedException { executionSlotsTotal.set(0); inputFetchSlotsTotal.set(0); if (execFileSystem != null) { - log.log(INFO, "Stopping exec filesystem"); + log.info("Stopping exec filesystem"); execFileSystem.stop(); + execFileSystem = null; } if (server != null) { - log.log(INFO, "Shutting down the server"); + log.info("Shutting down the server"); server.shutdown(); try { @@ -702,26 +709,28 @@ public void stop() throws InterruptedException { } finally { server.shutdownNow(); } + server = null; } if (backplane != null) { try { backplane.stop(); + backplane = null; } catch (InterruptedException e) { interrupted = true; } } if (workerStubs != null) { workerStubs.invalidateAll(); + workerStubs = null; } if (interrupted) { Thread.currentThread().interrupt(); throw new InterruptedException(); } - System.err.println("*** server shut down"); + log.info("*** server shut down"); } - @PostConstruct - public void init() throws OptionsParsingException { + public static void main(String[] args) throws Exception { // Only log severe log messages from Netty. Otherwise it logs warnings that look like this: // // 170714 08:16:28.552:WT 18 [io.grpc.netty.NettyServerHandler.onStreamError] Stream Error @@ -729,19 +738,17 @@ public void init() throws OptionsParsingException { // unknown stream 11369 nettyLogger.setLevel(SEVERE); + configs = BuildfarmConfigs.loadWorkerConfigs(args); + Worker worker = new Worker(); try { - start(); + worker.start(); + worker.awaitTermination(); } catch (IOException e) { - System.err.println("error: " + formatIOError(e)); + log.severe(formatIOError(e)); } catch (InterruptedException e) { - System.out.println("error: interrupted"); - } catch (ConfigurationException e) { - throw new RuntimeException(e); + log.log(Level.WARNING, "interrupted", e); + } finally { + worker.stop(); } } - - public static void main(String[] args) throws ConfigurationException { - configs = BuildfarmConfigs.loadWorkerConfigs(args); - SpringApplication.run(Worker.class, args); - } } diff --git a/src/main/java/build/buildfarm/worker/shard/ShardWorkerInstance.java b/src/main/java/build/buildfarm/worker/shard/WorkerInstance.java similarity index 96% rename from src/main/java/build/buildfarm/worker/shard/ShardWorkerInstance.java rename to src/main/java/build/buildfarm/worker/shard/WorkerInstance.java index 04c11d315b..e2af59aec3 100644 --- a/src/main/java/build/buildfarm/worker/shard/ShardWorkerInstance.java +++ b/src/main/java/build/buildfarm/worker/shard/WorkerInstance.java @@ -36,7 +36,7 @@ import build.buildfarm.common.Write; import build.buildfarm.common.grpc.UniformDelegateServerCallStreamObserver; import build.buildfarm.instance.MatchListener; -import build.buildfarm.instance.server.AbstractServerInstance; +import build.buildfarm.instance.server.NodeInstance; import build.buildfarm.operations.EnrichedOperation; import build.buildfarm.operations.FindOperationsResults; import build.buildfarm.v1test.BackplaneStatus; @@ -56,6 +56,7 @@ import io.grpc.Status; import io.grpc.Status.Code; import io.grpc.stub.ServerCallStreamObserver; +import io.prometheus.client.Counter; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; @@ -67,10 +68,13 @@ import lombok.extern.java.Log; @Log -public class ShardWorkerInstance extends AbstractServerInstance { +public class WorkerInstance extends NodeInstance { + private static final Counter IO_METRIC = + Counter.build().name("io_bytes_read").help("Read I/O (bytes)").register(); + private final Backplane backplane; - public ShardWorkerInstance( + public WorkerInstance( String name, DigestUtil digestUtil, Backplane backplane, @@ -132,6 +136,7 @@ public void getBlob( @Override public void onNext(ByteString data) { blobObserver.onNext(data); + IO_METRIC.inc(data.size()); } void removeBlobLocation() { @@ -341,13 +346,12 @@ protected static ExecuteOperationMetadata expectExecuteOperationMetadata(Operati return null; } } else { - return AbstractServerInstance.expectExecuteOperationMetadata(operation); + return NodeInstance.expectExecuteOperationMetadata(operation); } } public Operation stripOperation(Operation operation) { - return operation - .toBuilder() + return operation.toBuilder() .setMetadata(Any.pack(expectExecuteOperationMetadata(operation))) .build(); } @@ -355,8 +359,7 @@ public Operation stripOperation(Operation operation) { public Operation stripQueuedOperation(Operation operation) { if (operation.getMetadata().is(QueuedOperationMetadata.class)) { operation = - operation - .toBuilder() + operation.toBuilder() .setMetadata(Any.pack(expectExecuteOperationMetadata(operation))) .build(); } diff --git a/src/main/java/build/buildfarm/worker/shard/WorkerProfileService.java b/src/main/java/build/buildfarm/worker/shard/WorkerProfileService.java index 6eee990426..f00c57cc3d 100644 --- a/src/main/java/build/buildfarm/worker/shard/WorkerProfileService.java +++ b/src/main/java/build/buildfarm/worker/shard/WorkerProfileService.java @@ -15,7 +15,6 @@ package build.buildfarm.worker.shard; import build.buildfarm.backplane.Backplane; -import build.buildfarm.cas.ContentAddressableStorage; import build.buildfarm.cas.cfc.CASFileCache; import build.buildfarm.v1test.OperationTimesBetweenStages; import build.buildfarm.v1test.StageInformation; @@ -24,67 +23,90 @@ import build.buildfarm.v1test.WorkerProfileGrpc; import build.buildfarm.v1test.WorkerProfileMessage; import build.buildfarm.v1test.WorkerProfileRequest; -import build.buildfarm.worker.ExecuteActionStage; -import build.buildfarm.worker.InputFetchStage; import build.buildfarm.worker.PipelineStage; import build.buildfarm.worker.PutOperationStage; import build.buildfarm.worker.PutOperationStage.OperationStageDurations; -import build.buildfarm.worker.WorkerContext; +import build.buildfarm.worker.SuperscalarPipelineStage; import io.grpc.stub.StreamObserver; import java.io.IOException; +import javax.annotation.Nullable; public class WorkerProfileService extends WorkerProfileGrpc.WorkerProfileImplBase { - private final CASFileCache storage; - private final InputFetchStage inputFetchStage; - private final ExecuteActionStage executeActionStage; - private final WorkerContext context; + private final @Nullable CASFileCache storage; + private final PipelineStage matchStage; + private final SuperscalarPipelineStage inputFetchStage; + private final SuperscalarPipelineStage executeActionStage; + private final PipelineStage reportResultStage; private final PutOperationStage completeStage; private final Backplane backplane; public WorkerProfileService( - ContentAddressableStorage storage, - PipelineStage inputFetchStage, - PipelineStage executeActionStage, - WorkerContext context, - PipelineStage completeStage, + @Nullable CASFileCache storage, + PipelineStage matchStage, + SuperscalarPipelineStage inputFetchStage, + SuperscalarPipelineStage executeActionStage, + PipelineStage reportResultStage, + PutOperationStage completeStage, Backplane backplane) { - this.storage = (CASFileCache) storage; - this.inputFetchStage = (InputFetchStage) inputFetchStage; - this.executeActionStage = (ExecuteActionStage) executeActionStage; - this.context = context; + this.storage = storage; + this.matchStage = matchStage; + this.inputFetchStage = inputFetchStage; + this.executeActionStage = executeActionStage; + this.reportResultStage = reportResultStage; this.completeStage = (PutOperationStage) completeStage; this.backplane = backplane; } + private StageInformation unaryStageInformation(String name, @Nullable String operationName) { + StageInformation.Builder builder = + StageInformation.newBuilder().setName(name).setSlotsConfigured(1); + if (operationName != null) { + builder.setSlotsUsed(1).addOperationNames(operationName); + } + return builder.build(); + } + + private StageInformation superscalarStageInformation(SuperscalarPipelineStage stage) { + return StageInformation.newBuilder() + .setName(stage.getName()) + .setSlotsConfigured(stage.getWidth()) + .setSlotsUsed(stage.getSlotUsage()) + .addAllOperationNames(stage.getOperationNames()) + .build(); + } + @Override public void getWorkerProfile( WorkerProfileRequest request, StreamObserver responseObserver) { // get usage of CASFileCache - WorkerProfileMessage.Builder replyBuilder = - WorkerProfileMessage.newBuilder() - .setCasSize(storage.size()) - .setCasEntryCount(storage.entryCount()) - .setCasMaxSize(storage.maxSize()) - .setCasMaxEntrySize(storage.maxEntrySize()) - .setCasUnreferencedEntryCount(storage.unreferencedEntryCount()) - .setCasDirectoryEntryCount(storage.directoryStorageCount()) - .setCasEvictedEntryCount(storage.getEvictedCount()) - .setCasEvictedEntrySize(storage.getEvictedSize()); + WorkerProfileMessage.Builder replyBuilder = WorkerProfileMessage.newBuilder(); + + // FIXME deliver full local storage chain + if (storage != null) { + replyBuilder + .setCasSize(storage.size()) + .setCasEntryCount(storage.entryCount()) + .setCasMaxSize(storage.maxSize()) + .setCasMaxEntrySize(storage.maxEntrySize()) + .setCasUnreferencedEntryCount(storage.unreferencedEntryCount()) + .setCasDirectoryEntryCount(storage.directoryStorageCount()) + .setCasEvictedEntryCount(storage.getEvictedCount()) + .setCasEvictedEntrySize(storage.getEvictedSize()); + } // get slots configured and used of superscalar stages + // prefer reverse order to avoid double counting if possible + // these stats are not consistent across their sampling and will + // produce: slots that are not consistent with operations, operations + // in multiple stages even in reverse due to claim progress + // in short: this is for monitoring, not for guaranteed consistency checks + String reportResultOperation = reportResultStage.getOperationName(); + String matchOperation = matchStage.getOperationName(); replyBuilder - .addStages( - StageInformation.newBuilder() - .setName("InputFetchStage") - .setSlotsConfigured(context.getInputFetchStageWidth()) - .setSlotsUsed(inputFetchStage.getSlotUsage()) - .build()) - .addStages( - StageInformation.newBuilder() - .setName("ExecuteActionStage") - .setSlotsConfigured(context.getExecuteStageWidth()) - .setSlotsUsed(executeActionStage.getSlotUsage()) - .build()); + .addStages(unaryStageInformation(reportResultStage.getName(), reportResultOperation)) + .addStages(superscalarStageInformation(executeActionStage)) + .addStages(superscalarStageInformation(inputFetchStage)) + .addStages(unaryStageInformation(matchStage.getName(), matchOperation)); // get average time costs on each stage OperationStageDurations[] durations = completeStage.getAverageTimeCostPerStage(); diff --git a/src/main/java/build/buildfarm/worker/util/BUILD b/src/main/java/build/buildfarm/worker/util/BUILD new file mode 100644 index 0000000000..a92360d732 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/util/BUILD @@ -0,0 +1,24 @@ +java_library( + name = "util", + srcs = glob(["*.java"]), + visibility = ["//visibility:public"], + deps = [ + "//persistentworkers/src/main/protobuf:worker_protocol_java_proto", + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/instance", + "//src/main/java/build/buildfarm/instance/stub", + "//src/main/java/build/buildfarm/worker/resources", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "@maven//:com_google_code_gson_gson", + "@maven//:com_google_guava_guava", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_protobuf_protobuf_java_util", + "@maven//:commons_io_commons_io", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_context", + "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_netty", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:io_grpc_grpc_stub", + ], +) diff --git a/src/main/java/build/buildfarm/worker/util/InputsIndexer.java b/src/main/java/build/buildfarm/worker/util/InputsIndexer.java new file mode 100644 index 0000000000..84497b04a0 --- /dev/null +++ b/src/main/java/build/buildfarm/worker/util/InputsIndexer.java @@ -0,0 +1,141 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.util; + +import build.bazel.remote.execution.v2.Digest; +import build.bazel.remote.execution.v2.Directory; +import build.bazel.remote.execution.v2.FileNode; +import build.bazel.remote.execution.v2.NodeProperty; +import build.buildfarm.common.ProxyDirectoriesIndex; +import build.buildfarm.v1test.Tree; +import com.google.common.collect.ImmutableMap; +import com.google.devtools.build.lib.worker.WorkerProtocol.Input; +import java.nio.file.FileSystem; +import java.nio.file.Path; +import java.util.Map; + +/** + * Organizes action Inputs into files, extracting their paths, and differentiates tool inputs (e.g. + * JavaBuilder, Scalac, etc.) + * + *

Indexes (and partitions) Inputs from an action's Merkle Tree. + */ +public class InputsIndexer { + // See: https://github.com/bazelbuild/bazel/issues/10091 + public static final String BAZEL_TOOL_INPUT_MARKER = "bazel_tool_input"; + + final Tree tree; + final Map proxyDirs; + + final FileSystem fs; + + final Path opRoot; + + ImmutableMap files = null; + ImmutableMap absPathInputs = null; + ImmutableMap toolInputs = null; + + public InputsIndexer(Tree tree, Path opRoot) { + this.tree = tree; + this.proxyDirs = new ProxyDirectoriesIndex(tree.getDirectoriesMap()); + this.opRoot = opRoot; + this.fs = opRoot.getFileSystem(); + } + + // https://stackoverflow.com/questions/22611919/why-do-i-get-providermismatchexception-when-i-try-to-relativize-a-path-agains + public Path pathTransform(final Path path) { + Path ret = fs.getPath(path.isAbsolute() ? fs.getSeparator() : ""); + for (final Path component : path) ret = ret.resolve(component.getFileName().toString()); + return ret; + } + + public ImmutableMap getAllInputs() { + if (absPathInputs == null) { + ImmutableMap relFiles = getAllFiles(); + ImmutableMap.Builder inputs = ImmutableMap.builder(); + + for (Map.Entry pf : relFiles.entrySet()) { + Path absPath = this.opRoot.resolve(pf.getKey()).normalize(); + inputs.put(absPath, inputFromFile(absPath, pf.getValue())); + } + absPathInputs = inputs.build(); + } + return absPathInputs; + } + + public ImmutableMap getToolInputs() { + if (toolInputs == null) { + ImmutableMap relFiles = getAllFiles(); + ImmutableMap.Builder inputs = ImmutableMap.builder(); + + for (Map.Entry pf : relFiles.entrySet()) { + FileNode fn = pf.getValue(); + if (isToolInput(fn)) { + Path absPath = this.opRoot.resolve(pf.getKey()); + inputs.put(absPath, inputFromFile(absPath, fn)); + } + } + toolInputs = inputs.build(); + } + return toolInputs; + } + + private ImmutableMap getAllFiles() { + if (files == null) { + ImmutableMap.Builder accumulator = ImmutableMap.builder(); + Directory rootDir = proxyDirs.get(tree.getRootDigest()); + + Path fsRelative = fs.getPath("."); + files = getFilesFromDir(fsRelative, rootDir, accumulator).build(); + } + return files; + } + + private Input inputFromFile(Path absPath, FileNode fileNode) { + return Input.newBuilder() + .setPath(absPath.toString()) + .setDigest(fileNode.getDigest().getHashBytes()) + .build(); + } + + private ImmutableMap.Builder getFilesFromDir( + Path dirPath, Directory dir, ImmutableMap.Builder acc) { + dir.getFilesList() + .forEach( + fileNode -> { + Path path = dirPath.resolve(fileNode.getName()).normalize(); + acc.put(path, fileNode); + }); + + // Recurse into subdirectories + dir.getDirectoriesList() + .forEach( + dirNode -> + getFilesFromDir( + dirPath.resolve(dirNode.getName()), + this.proxyDirs.get(dirNode.getDigest()), + acc)); + return acc; + } + + private static boolean isToolInput(FileNode fileNode) { + for (NodeProperty prop : fileNode.getNodeProperties().getPropertiesList()) { + if (prop.getName().equals(BAZEL_TOOL_INPUT_MARKER)) { + return true; + } + } + return false; + } +} diff --git a/src/main/protobuf/BUILD.bazel b/src/main/protobuf/BUILD.bazel index 9f2e044cce..5f21b2d4b0 100644 --- a/src/main/protobuf/BUILD.bazel +++ b/src/main/protobuf/BUILD.bazel @@ -1,6 +1,6 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") +load("@grpc-java//:java_grpc_library.bzl", "java_grpc_library") load("@rules_java//java:defs.bzl", "java_proto_library") -load("@io_grpc_grpc_java//:java_grpc_library.bzl", "java_grpc_library") +load("@rules_proto//proto:defs.bzl", "proto_library") package(default_visibility = ["//visibility:public"]) @@ -8,12 +8,12 @@ proto_library( name = "build_buildfarm_v1test_buildfarm_proto", srcs = ["build/buildfarm/v1test/buildfarm.proto"], deps = [ + "@com_google_googleapis//google/api:annotations_proto", + "@com_google_googleapis//google/longrunning:operations_proto", + "@com_google_googleapis//google/rpc:status_proto", "@com_google_protobuf//:duration_proto", "@com_google_protobuf//:timestamp_proto", - "@googleapis//:google_api_annotations_proto", - "@googleapis//:google_longrunning_operations_proto", - "@googleapis//:google_rpc_status_proto", - "@remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_proto", ], ) @@ -21,9 +21,9 @@ java_proto_library( name = "build_buildfarm_v1test_buildfarm_java_proto", deps = [ ":build_buildfarm_v1test_buildfarm_proto", - "@googleapis//:google_longrunning_operations_proto", - "@googleapis//:google_rpc_status_proto", - "@remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@com_google_googleapis//google/longrunning:operations_proto", + "@com_google_googleapis//google/rpc:status_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_proto", ], ) diff --git a/src/main/protobuf/build/buildfarm/v1test/buildfarm.proto b/src/main/protobuf/build/buildfarm/v1test/buildfarm.proto index 96fd74f739..a47f691438 100644 --- a/src/main/protobuf/build/buildfarm/v1test/buildfarm.proto +++ b/src/main/protobuf/build/buildfarm/v1test/buildfarm.proto @@ -274,10 +274,14 @@ message ShardWorker { int64 expire_at = 2; int32 worker_type = 3; + + int64 first_registered_at = 4; } message WorkerChange { - message Add {} + message Add { + google.protobuf.Timestamp effectiveAt = 1; + } message Remove { string source = 1; @@ -444,8 +448,13 @@ message BackplaneStatus { DispatchedOperationsStatus dispatched_operations = 9; + // Maintained for backward compatibility. repeated string active_workers = 4; + repeated string active_storage_workers = 12; + + repeated string active_execute_workers = 13; + int64 cas_lookup_size = 5; int64 action_cache_size = 6; @@ -610,6 +619,8 @@ message StageInformation { // number of slots used for this stage int32 slots_used = 3; + + repeated string operation_names = 4; } message WorkerProfileMessage { diff --git a/src/test/java/build/buildfarm/cas/BUILD b/src/test/java/build/buildfarm/cas/BUILD index ff7774e067..be43dc41db 100644 --- a/src/test/java/build/buildfarm/cas/BUILD +++ b/src/test/java/build/buildfarm/cas/BUILD @@ -1,10 +1,10 @@ -load("//:jvm_flags.bzl", "ensure_accurate_metadata") +load("//:jvm_flags.bzl", "add_opens_sun_nio_fs", "ensure_accurate_metadata") java_test( name = "tests", size = "small", srcs = glob(["**/*.java"]), - jvm_flags = ensure_accurate_metadata(), + jvm_flags = ensure_accurate_metadata() + add_opens_sun_nio_fs(), test_class = "build.buildfarm.AllTests", deps = [ "//src/main/java/build/buildfarm/cas", @@ -12,8 +12,9 @@ java_test( "//src/main/java/build/buildfarm/instance/stub", "//src/test/java/build/buildfarm:test_runner", "//src/test/java/build/buildfarm/common/grpc", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", + "//third_party/remote-apis:build_bazel_remote_execution_v2_remote_execution_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_jimfs_jimfs", "@maven//:com_google_protobuf_protobuf_java", @@ -21,9 +22,10 @@ java_test( "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_inprocess", "@maven//:io_grpc_grpc_stub", + "@maven//:io_grpc_grpc_util", "@maven//:org_mockito_mockito_core", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_grpc", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/test/java/build/buildfarm/cas/GrpcCASTest.java b/src/test/java/build/buildfarm/cas/GrpcCASTest.java index 6db6fd59aa..ed84cc02d1 100644 --- a/src/test/java/build/buildfarm/cas/GrpcCASTest.java +++ b/src/test/java/build/buildfarm/cas/GrpcCASTest.java @@ -19,9 +19,11 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; import build.bazel.remote.execution.v2.Compressor; import build.bazel.remote.execution.v2.ContentAddressableStorageGrpc.ContentAddressableStorageImplBase; @@ -108,7 +110,7 @@ public void read(ReadRequest request, StreamObserver responseObser GrpcCAS cas = new GrpcCAS( instanceName, - /* readonly=*/ true, + /* readonly= */ true, InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(), mock(ByteStreamUploader.class), onExpirations); @@ -138,7 +140,7 @@ public void read(ReadRequest request, StreamObserver responseObser GrpcCAS cas = new GrpcCAS( instanceName, - /* readonly=*/ true, + /* readonly= */ true, InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(), mock(ByteStreamUploader.class), onExpirations); @@ -156,13 +158,14 @@ public void putAddsExpiration() throws IOException, InterruptedException { MultimapBuilder.hashKeys().arrayListValues().build(); Channel channel = InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(); ByteStreamUploader uploader = mock(ByteStreamUploader.class); - GrpcCAS cas = new GrpcCAS(instanceName, /* readonly=*/ false, channel, uploader, onExpirations); + GrpcCAS cas = + new GrpcCAS(instanceName, /* readonly= */ false, channel, uploader, onExpirations); Runnable onExpiration = mock(Runnable.class); cas.put(new Blob(uploadContent, digest), onExpiration); verify(uploader, times(1)) .uploadBlob(eq(HashCode.fromString(digest.getHash())), any(Chunker.class)); assertThat(onExpirations.get(digest)).containsExactly(onExpiration); - verifyZeroInteractions(onExpiration); + verifyNoInteractions(onExpiration); } @Test @@ -183,7 +186,7 @@ public void writeIsResumable() throws Exception { Channel channel = InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(); GrpcCAS cas = new GrpcCAS( - instanceName, /* readonly=*/ false, channel, /* uploader=*/ null, onExpirations); + instanceName, /* readonly= */ false, channel, /* uploader= */ null, onExpirations); RequestMetadata requestMetadata = RequestMetadata.getDefaultInstance(); Write initialWrite = cas.getWrite(Compressor.Value.IDENTITY, digest, uuid, requestMetadata); try (OutputStream writeOut = initialWrite.getOutput(1, SECONDS, () -> {})) { @@ -205,9 +208,9 @@ public void writeIsNullForReadonly() throws Exception { GrpcCAS cas = new GrpcCAS( instanceName, - /* readonly=*/ true, - /* channel=*/ null, - /* uploader=*/ null, + /* readonly= */ true, + /* channel= */ null, + /* uploader= */ null, onExpirations); RequestMetadata requestMetadata = RequestMetadata.getDefaultInstance(); @@ -219,12 +222,14 @@ public void writeIsNullForReadonly() throws Exception { public void findMissingBlobsSwallowsFilteredList() throws Exception { Channel channel = InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(); Runnable onExpiration = mock(Runnable.class); - GrpcCAS cas = new GrpcCAS("test", /* readonly=*/ false, channel, null, onExpirations); - ContentAddressableStorageImplBase casService = mock(ContentAddressableStorageImplBase.class); + GrpcCAS cas = new GrpcCAS("test", /* readonly= */ false, channel, null, onExpirations); + ContentAddressableStorageImplBase casService = spy(ContentAddressableStorageImplBase.class); serviceRegistry.addService(casService); + // Mutable calls bindService, and clearInvocations is undesirable + verify(casService, times(1)).bindService(); Digest emptyDigest = Digest.getDefaultInstance(); assertThat(cas.findMissingBlobs(ImmutableList.of(emptyDigest))).isEmpty(); - verifyZeroInteractions(casService); - verifyZeroInteractions(onExpiration); + verifyNoMoreInteractions(casService); + verifyNoInteractions(onExpiration); } } diff --git a/src/test/java/build/buildfarm/cas/MemoryWriteOutputStreamTest.java b/src/test/java/build/buildfarm/cas/MemoryWriteOutputStreamTest.java index 2a979d346e..545b0d0d2f 100644 --- a/src/test/java/build/buildfarm/cas/MemoryWriteOutputStreamTest.java +++ b/src/test/java/build/buildfarm/cas/MemoryWriteOutputStreamTest.java @@ -16,7 +16,7 @@ import static com.google.common.truth.Truth.assertThat; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.verifyNoInteractions; import build.bazel.remote.execution.v2.Digest; import build.buildfarm.common.DigestUtil; @@ -45,6 +45,6 @@ public void asyncWriteCompletionIsComplete() throws IOException { writtenFuture.set(content); assertThat(write.isComplete()).isTrue(); assertThat(write.getCommittedSize()).isEqualTo(digest.getSizeBytes()); - verifyZeroInteractions(cas); + verifyNoInteractions(cas); } } diff --git a/src/test/java/build/buildfarm/cas/cfc/CASFileCacheTest.java b/src/test/java/build/buildfarm/cas/cfc/CASFileCacheTest.java index 25143bbc9d..e24c14262f 100644 --- a/src/test/java/build/buildfarm/cas/cfc/CASFileCacheTest.java +++ b/src/test/java/build/buildfarm/cas/cfc/CASFileCacheTest.java @@ -19,8 +19,11 @@ import static com.google.common.truth.Truth.assertThat; import static com.google.common.util.concurrent.MoreExecutors.directExecutor; import static com.google.common.util.concurrent.MoreExecutors.shutdownAndAwaitTermination; +import static java.lang.Thread.State.TERMINATED; +import static java.lang.Thread.State.WAITING; import static java.util.concurrent.Executors.newSingleThreadExecutor; import static java.util.concurrent.TimeUnit.MICROSECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.Assert.fail; import static org.mockito.Mockito.any; @@ -29,7 +32,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; import build.bazel.remote.execution.v2.Compressor; @@ -43,7 +46,6 @@ import build.buildfarm.cas.DigestMismatchException; import build.buildfarm.cas.cfc.CASFileCache.CancellableOutputStream; import build.buildfarm.cas.cfc.CASFileCache.Entry; -import build.buildfarm.cas.cfc.CASFileCache.PutDirectoryException; import build.buildfarm.cas.cfc.CASFileCache.StartupCacheResults; import build.buildfarm.common.DigestUtil; import build.buildfarm.common.DigestUtil.HashFunction; @@ -59,6 +61,7 @@ import com.google.common.collect.Maps; import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; +import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; @@ -78,6 +81,7 @@ import java.util.Map; import java.util.UUID; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; @@ -144,21 +148,20 @@ public void setUp() throws IOException, InterruptedException { fileCache = new CASFileCache( root, - /* maxSizeInBytes=*/ 1024, - /* maxEntrySizeInBytes=*/ 1024, - /* hexBucketLevels=*/ 1, + /* maxSizeInBytes= */ 1024, + /* maxEntrySizeInBytes= */ 1024, + /* hexBucketLevels= */ 1, storeFileDirsIndexInMemory, - /* publishTtlMetric=*/ false, - /* execRootFallback=*/ false, + /* execRootFallback= */ false, DIGEST_UTIL, expireService, - /* accessRecorder=*/ directExecutor(), + /* accessRecorder= */ directExecutor(), storage, - /* directoriesIndexDbName=*/ ":memory:", + /* directoriesIndexDbName= */ ":memory:", onPut, onExpire, delegate, - /* delegateSkipLoad=*/ false) { + /* delegateSkipLoad= */ false) { @Override protected InputStream newExternalInput( Compressor.Value compressor, Digest digest, long offset) throws IOException { @@ -176,10 +179,11 @@ protected InputStream newExternalInput( @After public void tearDown() throws IOException, InterruptedException { + FileStore fileStore = Files.getFileStore(root); // bazel appears to have a problem with us creating directories under // windows that are marked as no-delete. clean up after ourselves with // our utils - Directories.remove(root); + Directories.remove(root, fileStore); if (!shutdownAndAwaitTermination(putService, 1, SECONDS)) { throw new RuntimeException("could not shut down put service"); } @@ -203,12 +207,12 @@ public void putEmptyFileThrowsIllegalStateException() throws IOException, Interr ByteString blob = ByteString.copyFromUtf8(""); Digest blobDigest = DIGEST_UTIL.compute(blob); // supply an empty input stream if called for test clarity - when(mockInputStreamFactory.newInput(Compressor.Value.IDENTITY, blobDigest, /* offset=*/ 0)) + when(mockInputStreamFactory.newInput(Compressor.Value.IDENTITY, blobDigest, /* offset= */ 0)) .thenReturn(ByteString.EMPTY.newInput()); try { fileCache.put(blobDigest, false); } finally { - verifyZeroInteractions(mockInputStreamFactory); + verifyNoInteractions(mockInputStreamFactory); } } @@ -241,7 +245,8 @@ public void putDirectoryCreatesTree() throws IOException, InterruptedException { subdirDigest, subDirectory); Path dirPath = getInterruptiblyOrIOException( - fileCache.putDirectory(dirDigest, directoriesIndex, putService)); + fileCache.putDirectory(dirDigest, directoriesIndex, putService)) + .getPath(); assertThat(Files.isDirectory(dirPath)).isTrue(); assertThat(Files.exists(dirPath.resolve("file"))).isTrue(); assertThat(Files.isDirectory(dirPath.resolve("subdir"))).isTrue(); @@ -369,7 +374,7 @@ public void startSkipsLoadingExistingBlob() throws IOException, InterruptedExcep Files.write(execPath, blob.toByteArray()); EvenMoreFiles.setReadOnlyPerms(execPath, true, fileStore); - StartupCacheResults results = fileCache.start(/* skipLoad=*/ true); + StartupCacheResults results = fileCache.start(/* skipLoad= */ true); // check the startup results to ensure our two files were processed assertThat(results.load.loadSkipped).isTrue(); @@ -394,7 +399,7 @@ public void startRemovesInvalidEntries() throws IOException, InterruptedExceptio Files.write( invalidExec, validBlob.toByteArray()); // content would match but for invalid exec field - fileCache.start(/* skipLoad=*/ false); + fileCache.start(/* skipLoad= */ false); assertThat(!Files.exists(tooFewComponents)).isTrue(); assertThat(!Files.exists(tooManyComponents)).isTrue(); @@ -429,7 +434,7 @@ public void expireEntryWaitsForUnreferencedEntry() ByteString bigContent = ByteString.copyFrom(bigData); Digest bigDigest = DIGEST_UTIL.compute(bigContent); blobs.put(bigDigest, bigContent); - Path bigPath = fileCache.put(bigDigest, /* isExecutable=*/ false); + Path bigPath = fileCache.put(bigDigest, /* isExecutable= */ false); AtomicBoolean started = new AtomicBoolean(false); ExecutorService service = newSingleThreadExecutor(); @@ -440,7 +445,7 @@ public void expireEntryWaitsForUnreferencedEntry() ByteString content = ByteString.copyFromUtf8("CAS Would Exceed Max Size"); Digest digest = DIGEST_UTIL.compute(content); blobs.put(digest, content); - fileCache.put(digest, /* isExecutable=*/ false); + fileCache.put(digest, /* isExecutable= */ false); return null; }); while (!started.get()) { @@ -470,10 +475,10 @@ public void containsRecordsAccess() throws IOException, InterruptedException { Digest digestThree = DIGEST_UTIL.compute(contentThree); blobs.put(digestThree, contentThree); - String pathOne = fileCache.put(digestOne, /* isExecutable=*/ false).getFileName().toString(); - String pathTwo = fileCache.put(digestTwo, /* isExecutable=*/ false).getFileName().toString(); + String pathOne = fileCache.put(digestOne, /* isExecutable= */ false).getFileName().toString(); + String pathTwo = fileCache.put(digestTwo, /* isExecutable= */ false).getFileName().toString(); String pathThree = - fileCache.put(digestThree, /* isExecutable=*/ false).getFileName().toString(); + fileCache.put(digestThree, /* isExecutable= */ false).getFileName().toString(); fileCache.decrementReferences( ImmutableList.of(pathOne, pathTwo, pathThree), ImmutableList.of()); /* three -> two -> one */ @@ -494,8 +499,8 @@ public void mismatchedSizeIsNotContained() throws InterruptedException { fileCache.put(blob); Digest mismatchedDigest = digest.toBuilder().setSizeBytes(digest.getSizeBytes() + 1).build(); - assertThat(fileCache.contains(digest, /* result=*/ null)).isTrue(); - assertThat(fileCache.contains(mismatchedDigest, /* result=*/ null)).isFalse(); + assertThat(fileCache.contains(digest, /* result= */ null)).isTrue(); + assertThat(fileCache.contains(mismatchedDigest, /* result= */ null)).isFalse(); } @Test @@ -691,14 +696,14 @@ public void readRemovesNonexistentEntry() throws IOException, InterruptedExcepti Blob blob = new Blob(content, DIGEST_UTIL); fileCache.put(blob); - String key = fileCache.getKey(blob.getDigest(), /* isExecutable=*/ false); + String key = fileCache.getKey(blob.getDigest(), /* isExecutable= */ false); // putCreatesFile verifies this Files.delete(fileCache.getPath(key)); // update entry with expired deadline storage.get(key).existsDeadline = Deadline.after(0, SECONDS); try (InputStream in = - fileCache.newInput(Compressor.Value.IDENTITY, blob.getDigest(), /* offset=*/ 0)) { + fileCache.newInput(Compressor.Value.IDENTITY, blob.getDigest(), /* offset= */ 0)) { fail("should not get here"); } catch (NoSuchFileException e) { // success @@ -872,18 +877,18 @@ public void duplicateExpiredEntrySuppressesDigestExpiration() } blobs.put(expiringBlob.getDigest(), expiringBlob.getData()); decrementReference( - fileCache.put(expiringBlob.getDigest(), /* isExecutable=*/ false)); // expected eviction + fileCache.put(expiringBlob.getDigest(), /* isExecutable= */ false)); // expected eviction blobs.clear(); decrementReference( fileCache.put( expiringBlob.getDigest(), - /* isExecutable=*/ true)); // should be fed from storage directly, not through delegate + /* isExecutable= */ true)); // should be fed from storage directly, not through delegate fileCache.put(new Blob(ByteString.copyFromUtf8("Hello, World"), DIGEST_UTIL)); - verifyZeroInteractions(onExpire); + verifyNoInteractions(onExpire); // assert expiration of non-executable digest - String expiringKey = fileCache.getKey(expiringBlob.getDigest(), /* isExecutable=*/ false); + String expiringKey = fileCache.getKey(expiringBlob.getDigest(), /* isExecutable= */ false); assertThat(storage.containsKey(expiringKey)).isFalse(); assertThat(Files.exists(fileCache.getPath(expiringKey))).isFalse(); } @@ -1107,21 +1112,20 @@ public void copyExternalInputRetries() throws Exception { CASFileCache flakyExternalCAS = new CASFileCache( root, - /* maxSizeInBytes=*/ 1024, - /* maxEntrySizeInBytes=*/ 1024, - /* hexBucketLevels=*/ 1, + /* maxSizeInBytes= */ 1024, + /* maxEntrySizeInBytes= */ 1024, + /* hexBucketLevels= */ 1, storeFileDirsIndexInMemory, - /* publishTtlMetric=*/ false, - /* execRootFallback=*/ false, + /* execRootFallback= */ false, DIGEST_UTIL, expireService, - /* accessRecorder=*/ directExecutor(), + /* accessRecorder= */ directExecutor(), storage, - /* directoriesIndexDbName=*/ ":memory:", - /* onPut=*/ digest -> {}, - /* onExpire=*/ digests -> {}, - /* delegate=*/ null, - /* delegateSkipLoad=*/ false) { + /* directoriesIndexDbName= */ ":memory:", + /* onPut= */ digest -> {}, + /* onExpire= */ digests -> {}, + /* delegate= */ null, + /* delegateSkipLoad= */ false) { boolean throwUnavailable = true; @Override @@ -1171,21 +1175,20 @@ public void newInputThrowsNoSuchFileExceptionWithoutDelegate() throws Exception ContentAddressableStorage undelegatedCAS = new CASFileCache( root, - /* maxSizeInBytes=*/ 1024, - /* maxEntrySizeInBytes=*/ 1024, - /* hexBucketLevels=*/ 1, + /* maxSizeInBytes= */ 1024, + /* maxEntrySizeInBytes= */ 1024, + /* hexBucketLevels= */ 1, storeFileDirsIndexInMemory, - /* publishTtlMetric=*/ false, - /* execRootFallback=*/ false, + /* execRootFallback= */ false, DIGEST_UTIL, expireService, - /* accessRecorder=*/ directExecutor(), + /* accessRecorder= */ directExecutor(), storage, - /* directoriesIndexDbName=*/ ":memory:", - /* onPut=*/ digest -> {}, - /* onExpire=*/ digests -> {}, - /* delegate=*/ null, - /* delegateSkipLoad=*/ false) { + /* directoriesIndexDbName= */ ":memory:", + /* onPut= */ digest -> {}, + /* onExpire= */ digests -> {}, + /* delegate= */ null, + /* delegateSkipLoad= */ false) { @Override protected InputStream newExternalInput( Compressor.Value compressor, Digest digest, long offset) throws IOException { @@ -1201,7 +1204,7 @@ protected InputStream newExternalInput( Digest blobDigest = DIGEST_UTIL.compute(blob); NoSuchFileException expected = null; try (InputStream in = - undelegatedCAS.newInput(Compressor.Value.IDENTITY, blobDigest, /* offset=*/ 0)) { + undelegatedCAS.newInput(Compressor.Value.IDENTITY, blobDigest, /* offset= */ 0)) { fail("should not get here"); } catch (NoSuchFileException e) { expected = e; @@ -1209,6 +1212,116 @@ protected InputStream newExternalInput( assertThat(expected).isNotNull(); } + @Test + public void testConcurrentWrites() throws Exception { + ByteString blob = ByteString.copyFromUtf8("concurrent write"); + Digest digest = DIGEST_UTIL.compute(blob); + UUID uuid = UUID.randomUUID(); + // The same instance of Write will be passed to both the threads, so that the both threads + // try to get same output stream. + Write write = + fileCache.getWrite( + Compressor.Value.IDENTITY, digest, uuid, RequestMetadata.getDefaultInstance()); + + CyclicBarrier barrier = new CyclicBarrier(3); + + Thread write1 = + new Thread( + () -> { + try { + ConcurrentWriteStreamObserver writeStreamObserver = + new ConcurrentWriteStreamObserver(write); + writeStreamObserver.registerCallback(); + barrier.await(); // let both the threads get same write stream. + writeStreamObserver.ownStream(); // let other thread get the ownership of stream + writeStreamObserver.write(blob); + writeStreamObserver.close(); + } catch (Exception e) { + // do nothing + } + }, + "FirstRequest"); + Thread write2 = + new Thread( + () -> { + try { + ConcurrentWriteStreamObserver writeStreamObserver = + new ConcurrentWriteStreamObserver(write); + writeStreamObserver.registerCallback(); + writeStreamObserver.ownStream(); // this thread will get the ownership of stream + barrier.await(); // let both the threads get same write stream. + while (write1.getState() != WAITING) + ; // wait for first request to go in wait state + writeStreamObserver.write(blob); + writeStreamObserver.close(); + } catch (Exception e) { + // do nothing + } + }, + "SecondRequest"); + write1.start(); + write2.start(); + barrier.await(); // let both the requests reach the critical section + + // Wait for each write operation to complete, allowing a maximum of 100ms per write. + // Note: A 100ms wait time allowed 1000 * 8 successful test runs. + // In certain scenario, even this wait time may not be enough and test still be called flaky. + // But setting wait time 0 may cause test to wait forever (if there is issue in code) and the + // build might fail with timeout error. + write1.join(100); + write2.join(100); + + assertThat(write1.getState()).isEqualTo(TERMINATED); + assertThat(write2.getState()).isEqualTo(TERMINATED); + } + + static class ConcurrentWriteStreamObserver { + Write write; + FeedbackOutputStream out; + + ConcurrentWriteStreamObserver(Write write) { + this.write = write; + } + + void registerCallback() { + Futures.addCallback( + write.getFuture(), + new FutureCallback() { + @Override + public void onSuccess(Long committedSize) { + commit(); + } + + @Override + public void onFailure(Throwable t) { + // do nothing + } + }, + directExecutor()); + } + + synchronized void ownStream() throws Exception { + this.out = write.getOutput(10, MILLISECONDS, () -> {}); + } + + /** + * Request 1 may invoke this method for request 2 or vice-versa via callback on + * write.getFuture(). Synchronization is necessary to prevent conflicts when this method is + * called simultaneously by different threads. + */ + synchronized void commit() { + // critical section + } + + void write(ByteString data) throws IOException { + data.writeTo(out); + } + + void close() throws IOException { + out.close(); + } + } + @RunWith(JUnit4.class) public static class NativeFileDirsIndexInMemoryCASFileCacheTest extends CASFileCacheTest { public NativeFileDirsIndexInMemoryCASFileCacheTest() throws IOException { @@ -1243,8 +1356,7 @@ public OsXFileDirsIndexInMemoryCASFileCacheTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.osX() - .toBuilder() + Configuration.osX().toBuilder() .setAttributeViews("basic", "owner", "posix", "unix") .build()) .getRootDirectories(), @@ -1259,8 +1371,7 @@ public OsXFileDirsIndexInSqliteCASFileCacheTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.osX() - .toBuilder() + Configuration.osX().toBuilder() .setAttributeViews("basic", "owner", "posix", "unix") .build()) .getRootDirectories(), @@ -1275,8 +1386,7 @@ public UnixFileDirsIndexInMemoryCASFileCacheTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.unix() - .toBuilder() + Configuration.unix().toBuilder() .setAttributeViews("basic", "owner", "posix", "unix") .build()) .getRootDirectories(), @@ -1291,8 +1401,7 @@ public UnixFileDirsIndexInSqliteCASFileCacheTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.unix() - .toBuilder() + Configuration.unix().toBuilder() .setAttributeViews("basic", "owner", "posix", "unix") .build()) .getRootDirectories(), @@ -1307,8 +1416,7 @@ public WindowsFileDirsIndexInMemoryCASFileCacheTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.windows() - .toBuilder() + Configuration.windows().toBuilder() .setAttributeViews("basic", "owner", "dos", "acl", "posix", "user") .build()) .getRootDirectories(), @@ -1323,8 +1431,7 @@ public WindowsFileDirsIndexInSqliteCASFileCacheTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.windows() - .toBuilder() + Configuration.windows().toBuilder() .setAttributeViews("basic", "owner", "dos", "acl", "posix", "user") .build()) .getRootDirectories(), diff --git a/src/test/java/build/buildfarm/cas/cfc/DirectoriesIndexTest.java b/src/test/java/build/buildfarm/cas/cfc/DirectoriesIndexTest.java index 3eec66def7..3a2a2bba0d 100644 --- a/src/test/java/build/buildfarm/cas/cfc/DirectoriesIndexTest.java +++ b/src/test/java/build/buildfarm/cas/cfc/DirectoriesIndexTest.java @@ -40,7 +40,7 @@ public class DirectoriesIndexTest { private final DirectoriesIndex directoriesIndex; protected DirectoriesIndexTest(Path root, DirectoriesIndexType type) { - entryPathStrategy = new HexBucketEntryPathStrategy(root, /*levels=*/ 0); + entryPathStrategy = new HexBucketEntryPathStrategy(root, /* levels= */ 0); if (type == DirectoriesIndexType.Sqlite) { String jdbcIndexUrl = "jdbc:sqlite::memory:"; directoriesIndex = new SqliteFileDirectoriesIndex(jdbcIndexUrl, entryPathStrategy); @@ -112,8 +112,7 @@ public WindowsSqliteDirectoriesIndexTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.windows() - .toBuilder() + Configuration.windows().toBuilder() .setAttributeViews("basic", "owner", "dos", "acl", "posix", "user") .build()) .getRootDirectories(), @@ -128,8 +127,7 @@ public UnixSqliteDirectoriesIndexTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.unix() - .toBuilder() + Configuration.unix().toBuilder() .setAttributeViews("basic", "owner", "posix", "unix") .build()) .getRootDirectories(), @@ -144,8 +142,7 @@ public OsSqliteDirectoriesIndexTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.osX() - .toBuilder() + Configuration.osX().toBuilder() .setAttributeViews("basic", "owner", "posix", "unix") .build()) .getRootDirectories(), @@ -161,8 +158,7 @@ public WindowsMemoryFileDirectoriesIndexTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.windows() - .toBuilder() + Configuration.windows().toBuilder() .setAttributeViews("basic", "owner", "dos", "acl", "posix", "user") .build()) .getRootDirectories(), @@ -177,8 +173,7 @@ public UnixMemoryFileDirectoriesIndexTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.unix() - .toBuilder() + Configuration.unix().toBuilder() .setAttributeViews("basic", "owner", "posix", "unix") .build()) .getRootDirectories(), @@ -193,8 +188,7 @@ public OsMemoryFileDirectoriesIndexTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.osX() - .toBuilder() + Configuration.osX().toBuilder() .setAttributeViews("basic", "owner", "posix", "unix") .build()) .getRootDirectories(), diff --git a/src/test/java/build/buildfarm/common/BUILD b/src/test/java/build/buildfarm/common/BUILD index bda4f4243f..b1b996440f 100644 --- a/src/test/java/build/buildfarm/common/BUILD +++ b/src/test/java/build/buildfarm/common/BUILD @@ -1,18 +1,17 @@ -load("//:jvm_flags.bzl", "ensure_accurate_metadata") +load("//:jvm_flags.bzl", "add_opens_sun_nio_fs", "ensure_accurate_metadata") java_test( name = "tests", size = "small", srcs = glob(["*.java"]), - jvm_flags = ensure_accurate_metadata(), + jvm_flags = ensure_accurate_metadata() + add_opens_sun_nio_fs(), test_class = "build.buildfarm.AllTests", deps = [ "//src/main/java/build/buildfarm/common", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", - "//third_party/jedis", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_jimfs_jimfs", "@maven//:com_google_protobuf_protobuf_java", @@ -23,5 +22,6 @@ java_test( "@maven//:io_grpc_grpc_stub", "@maven//:io_grpc_grpc_testing", "@maven//:org_mockito_mockito_core", + "@maven//:redis_clients_jedis", ], ) diff --git a/src/test/java/build/buildfarm/common/DigestUtilTest.java b/src/test/java/build/buildfarm/common/DigestUtilTest.java index 8d581fc84e..9427c36c68 100644 --- a/src/test/java/build/buildfarm/common/DigestUtilTest.java +++ b/src/test/java/build/buildfarm/common/DigestUtilTest.java @@ -15,28 +15,29 @@ package build.buildfarm.common; import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.DigestFunction; import build.buildfarm.common.DigestUtil.HashFunction; import com.google.protobuf.ByteString; import java.io.IOException; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @RunWith(JUnit4.class) public class DigestUtilTest { - @Rule public final ExpectedException exception = ExpectedException.none(); - @Test public void buildThrowsOnInvalidHashCode() { DigestUtil digestUtil = new DigestUtil(HashFunction.MD5); - exception.expect(NumberFormatException.class); - exception.expectMessage("[foo] is not a valid MD5 hash."); - digestUtil.build("foo", 3); + NumberFormatException expected = + assertThrows( + NumberFormatException.class, + () -> { + digestUtil.build("foo", 3); + }); + assertThat(expected.getMessage()).isEqualTo("[foo] is not a valid MD5 hash."); } @Test diff --git a/src/test/java/build/buildfarm/common/config/BUILD b/src/test/java/build/buildfarm/common/config/BUILD new file mode 100644 index 0000000000..1c411712fa --- /dev/null +++ b/src/test/java/build/buildfarm/common/config/BUILD @@ -0,0 +1,14 @@ +java_test( + name = "tests", + srcs = glob(["*Test.java"]), + test_class = "build.buildfarm.AllTests", + deps = [ + "//src/main/java/build/buildfarm/common/config", + "//src/test/java/build/buildfarm:test_runner", + "@maven//:com_google_truth_truth", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_testing", + "@maven//:me_dinowernli_java_grpc_prometheus", + "@maven//:org_mockito_mockito_core", + ], +) diff --git a/src/test/java/build/buildfarm/common/config/BackplaneTest.java b/src/test/java/build/buildfarm/common/config/BackplaneTest.java new file mode 100644 index 0000000000..703f967e6c --- /dev/null +++ b/src/test/java/build/buildfarm/common/config/BackplaneTest.java @@ -0,0 +1,54 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.common.config; + +import static com.google.common.truth.Truth.assertThat; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * @class BackplaneTest + * @brief Tests utility functions for Backplane configuration overrides + */ +@RunWith(JUnit4.class) +public class BackplaneTest { + @Before + public void assertNoEnvVariable() { + // If a REDIS_PASSWORD env variable is set, it wins. We're not mocking env vars. + assertThat(System.getenv("REDIS_PASSWORD")).isNull(); + } + + @Test + public void testRedisPasswordFromUri() { + Backplane b = new Backplane(); + String testRedisUri = "redis://user:pass1@redisHost.redisDomain"; + b.setRedisUri(testRedisUri); + assertThat(b.getRedisPassword()).isEqualTo("pass1"); + } + + /** + * Validate that the redis URI password is higher priority than the `redisPassword` in the config + */ + @Test + public void testRedisPasswordPriorities() { + Backplane b = new Backplane(); + b.setRedisUri("redis://user:pass1@redisHost.redisDomain"); + b.setRedisPassword("pass2"); + assertThat(b.getRedisPassword()).isEqualTo("pass1"); + } +} diff --git a/src/test/java/build/buildfarm/common/config/GrpcMetricsTest.java b/src/test/java/build/buildfarm/common/config/GrpcMetricsTest.java new file mode 100644 index 0000000000..547f81b2ca --- /dev/null +++ b/src/test/java/build/buildfarm/common/config/GrpcMetricsTest.java @@ -0,0 +1,36 @@ +package build.buildfarm.common.config; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import io.grpc.ServerBuilder; +import me.dinowernli.grpc.prometheus.MonitoringServerInterceptor; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class GrpcMetricsTest { + @Mock private ServerBuilder serverBuilder; + private final GrpcMetrics grpcMetrics = new GrpcMetrics(); + + @Test + public void testHandleGrpcMetricIntercepts_disabled() { + grpcMetrics.setEnabled(false); + + GrpcMetrics.handleGrpcMetricIntercepts(serverBuilder, grpcMetrics); + verify(serverBuilder, never()).intercept(any(MonitoringServerInterceptor.class)); + } + + @Test + public void testHandleGrpcMetricIntercepts_withLatencyBucket() { + grpcMetrics.setEnabled(true); + grpcMetrics.setProvideLatencyHistograms(true); + grpcMetrics.setLatencyBuckets(new double[] {1, 2, 3}); + GrpcMetrics.handleGrpcMetricIntercepts(serverBuilder, grpcMetrics); + verify(serverBuilder, times(1)).intercept(any(MonitoringServerInterceptor.class)); + } +} diff --git a/src/test/java/build/buildfarm/common/grpc/BUILD b/src/test/java/build/buildfarm/common/grpc/BUILD index d470ee243a..b4f87c088c 100644 --- a/src/test/java/build/buildfarm/common/grpc/BUILD +++ b/src/test/java/build/buildfarm/common/grpc/BUILD @@ -4,8 +4,8 @@ java_library( visibility = ["//src/test/java/build/buildfarm:__subpackages__"], deps = [ "//src/main/java/build/buildfarm/common/grpc", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:io_grpc_grpc_api", @@ -21,14 +21,17 @@ java_test( deps = [ "//src/main/java/build/buildfarm/common/grpc", "//src/test/java/build/buildfarm:test_runner", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_truth_truth", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_inprocess", "@maven//:io_grpc_grpc_stub", "@maven//:io_grpc_grpc_testing", + "@maven//:io_grpc_grpc_util", "@maven//:org_mockito_mockito_core", ], ) diff --git a/src/test/java/build/buildfarm/common/grpc/ByteStreamHelperTest.java b/src/test/java/build/buildfarm/common/grpc/ByteStreamHelperTest.java index 14b4789ef3..b262217f7a 100644 --- a/src/test/java/build/buildfarm/common/grpc/ByteStreamHelperTest.java +++ b/src/test/java/build/buildfarm/common/grpc/ByteStreamHelperTest.java @@ -20,7 +20,6 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -43,17 +42,20 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; @RunWith(JUnit4.class) public class ByteStreamHelperTest { @Rule public final GrpcCleanupRule grpcCleanup = new GrpcCleanupRule(); - private final ByteStreamImplBase serviceImpl = mock(ByteStreamImplBase.class); + @Spy private ByteStreamImplBase serviceImpl; private Channel channel; @Before public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); String serverName = InProcessServerBuilder.generateName(); grpcCleanup @@ -85,11 +87,11 @@ public void newInputThrowsOnNotFound() { try (InputStream in = ByteStreamHelper.newInput( resourceName, - /* offset=*/ 0, + /* offset= */ 0, Suppliers.ofInstance(ByteStreamGrpc.newStub(channel)), NO_RETRIES::newBackoff, NO_RETRIES::isRetriable, - /* retryService=*/ null)) { + /* retryService= */ null)) { fail("should not get here"); } catch (IOException e) { assertThat(e).isInstanceOf(NoSuchFileException.class); diff --git a/src/test/java/build/buildfarm/common/grpc/ByteStreamServiceWriter.java b/src/test/java/build/buildfarm/common/grpc/ByteStreamServiceWriter.java index ebcfe68044..bdfd2e1a72 100644 --- a/src/test/java/build/buildfarm/common/grpc/ByteStreamServiceWriter.java +++ b/src/test/java/build/buildfarm/common/grpc/ByteStreamServiceWriter.java @@ -34,7 +34,7 @@ public class ByteStreamServiceWriter extends ByteStreamImplBase { private final ByteString.Output out; public ByteStreamServiceWriter(String resourceName, SettableFuture content) { - this(resourceName, content, /* expectedSize=*/ 0); + this(resourceName, content, /* expectedSize= */ 0); } public ByteStreamServiceWriter( diff --git a/src/test/java/build/buildfarm/common/grpc/StubWriteOutputStreamTest.java b/src/test/java/build/buildfarm/common/grpc/StubWriteOutputStreamTest.java index 5219e9d39d..4e176eba7a 100644 --- a/src/test/java/build/buildfarm/common/grpc/StubWriteOutputStreamTest.java +++ b/src/test/java/build/buildfarm/common/grpc/StubWriteOutputStreamTest.java @@ -17,10 +17,6 @@ import static com.google.common.truth.Truth.assertThat; import static java.util.concurrent.TimeUnit.MICROSECONDS; import static java.util.concurrent.TimeUnit.SECONDS; -import static org.mockito.AdditionalAnswers.delegatesTo; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -40,6 +36,7 @@ import io.grpc.inprocess.InProcessServerBuilder; import io.grpc.stub.StreamObserver; import io.grpc.testing.GrpcCleanupRule; +import io.grpc.util.MutableHandlerRegistry; import java.io.IOException; import java.io.OutputStream; import java.util.List; @@ -49,37 +46,27 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.mockito.ArgumentCaptor; +import org.mockito.MockitoAnnotations; @RunWith(JUnit4.class) public class StubWriteOutputStreamTest { @Rule public final GrpcCleanupRule grpcCleanup = new GrpcCleanupRule(); - @SuppressWarnings("unchecked") - private final StreamObserver writeObserver = mock(StreamObserver.class); - - private final ByteStreamImplBase serviceImpl = - mock( - ByteStreamImplBase.class, - delegatesTo( - new ByteStreamImplBase() { - @Override - public StreamObserver write( - StreamObserver responseObserver) { - return writeObserver; - } - })); + private final MutableHandlerRegistry serviceRegistry = new MutableHandlerRegistry(); private Channel channel; @Before public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + String serverName = InProcessServerBuilder.generateName(); grpcCleanup .register( InProcessServerBuilder.forName(serverName) + .fallbackHandlerRegistry(serviceRegistry) .directExecutor() - .addService(serviceImpl) .build()) .start(); @@ -91,28 +78,22 @@ public void setUp() throws Exception { @Test public void resetExceptionsAreInterpreted() { String unimplementedResourceName = "unimplemented-resource"; - QueryWriteStatusRequest unimplementedRequest = - QueryWriteStatusRequest.newBuilder().setResourceName(unimplementedResourceName).build(); - doAnswer( - invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onError(Status.UNIMPLEMENTED.asException()); - return null; - }) - .when(serviceImpl) - .queryWriteStatus(eq(unimplementedRequest), any(StreamObserver.class)); - String notFoundResourceName = "not-found-resource"; - QueryWriteStatusRequest notFoundRequest = - QueryWriteStatusRequest.newBuilder().setResourceName(notFoundResourceName).build(); - doAnswer( - invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onError(Status.NOT_FOUND.asException()); - return null; - }) - .when(serviceImpl) - .queryWriteStatus(eq(notFoundRequest), any(StreamObserver.class)); + serviceRegistry.addService( + new ByteStreamImplBase() { + @Override + public void queryWriteStatus( + QueryWriteStatusRequest request, + StreamObserver responseObserver) { + if (request.getResourceName().equals(unimplementedResourceName)) { + responseObserver.onError(Status.UNIMPLEMENTED.asException()); + } else if (request.getResourceName().equals(notFoundResourceName)) { + responseObserver.onError(Status.NOT_FOUND.asException()); + } else { + responseObserver.onError(Status.INVALID_ARGUMENT.asException()); + } + } + }); StubWriteOutputStream write = new StubWriteOutputStream( @@ -120,11 +101,9 @@ public void resetExceptionsAreInterpreted() { Suppliers.ofInstance(ByteStreamGrpc.newStub(channel)), unimplementedResourceName, Functions.identity(), - /* expectedSize=*/ StubWriteOutputStream.UNLIMITED_EXPECTED_SIZE, - /* autoflush=*/ true); + /* expectedSize= */ StubWriteOutputStream.UNLIMITED_EXPECTED_SIZE, + /* autoflush= */ true); assertThat(write.getCommittedSize()).isEqualTo(0); - verify(serviceImpl, times(1)) - .queryWriteStatus(eq(unimplementedRequest), any(StreamObserver.class)); write = new StubWriteOutputStream( @@ -132,15 +111,23 @@ public void resetExceptionsAreInterpreted() { Suppliers.ofInstance(ByteStreamGrpc.newStub(channel)), notFoundResourceName, Functions.identity(), - /* expectedSize=*/ StubWriteOutputStream.UNLIMITED_EXPECTED_SIZE, - /* autoflush=*/ true); + /* expectedSize= */ StubWriteOutputStream.UNLIMITED_EXPECTED_SIZE, + /* autoflush= */ true); assertThat(write.getCommittedSize()).isEqualTo(0); - verify(serviceImpl, times(1)).queryWriteStatus(eq(notFoundRequest), any(StreamObserver.class)); } @SuppressWarnings("unchecked") @Test public void resetIsRespectedOnSubsequentWrite() throws IOException { + StreamObserver writeObserver = mock(StreamObserver.class); + serviceRegistry.addService( + new ByteStreamImplBase() { + @Override + public StreamObserver write( + StreamObserver responseObserver) { + return writeObserver; + } + }); String resourceName = "reset-resource"; StubWriteOutputStream write = new StubWriteOutputStream( @@ -148,15 +135,14 @@ public void resetIsRespectedOnSubsequentWrite() throws IOException { Suppliers.ofInstance(ByteStreamGrpc.newStub(channel)), resourceName, Functions.identity(), - /* expectedSize=*/ StubWriteOutputStream.UNLIMITED_EXPECTED_SIZE, - /* autoflush=*/ true); + /* expectedSize= */ StubWriteOutputStream.UNLIMITED_EXPECTED_SIZE, + /* autoflush= */ true); ByteString content = ByteString.copyFromUtf8("Hello, World"); try (OutputStream out = write.getOutput(1, SECONDS, () -> {})) { content.writeTo(out); write.reset(); content.writeTo(out); } - verify(serviceImpl, times(1)).write(any(StreamObserver.class)); ArgumentCaptor writeRequestCaptor = ArgumentCaptor.forClass(WriteRequest.class); verify(writeObserver, times(3)).onNext(writeRequestCaptor.capture()); List requests = writeRequestCaptor.getAllValues(); @@ -173,8 +159,8 @@ public void getOutputCallback() throws IOException { Suppliers.ofInstance(ByteStreamGrpc.newStub(channel)), resourceName, Functions.identity(), - /* expectedSize=*/ StubWriteOutputStream.UNLIMITED_EXPECTED_SIZE, - /* autoflush=*/ true); + /* expectedSize= */ StubWriteOutputStream.UNLIMITED_EXPECTED_SIZE, + /* autoflush= */ true); boolean callbackTimedOut = false; try (OutputStream out = diff --git a/src/test/java/build/buildfarm/common/io/BUILD b/src/test/java/build/buildfarm/common/io/BUILD index 551b96ea58..a002ee4923 100644 --- a/src/test/java/build/buildfarm/common/io/BUILD +++ b/src/test/java/build/buildfarm/common/io/BUILD @@ -6,8 +6,9 @@ java_test( "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/common/grpc", "//src/test/java/build/buildfarm:test_runner", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@maven//:com_google_guava_guava", "@maven//:com_google_jimfs_jimfs", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_truth_truth", diff --git a/src/test/java/build/buildfarm/common/io/DirectoriesTest.java b/src/test/java/build/buildfarm/common/io/DirectoriesTest.java index 1c7e55ba63..e9c707b124 100644 --- a/src/test/java/build/buildfarm/common/io/DirectoriesTest.java +++ b/src/test/java/build/buildfarm/common/io/DirectoriesTest.java @@ -22,26 +22,35 @@ import com.google.common.jimfs.Jimfs; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.nio.file.FileStore; import java.nio.file.Files; import java.nio.file.Path; import org.junit.After; +import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; class DirectoriesTest { protected final Path root; + protected FileStore fileStore; protected DirectoriesTest(Path root) { this.root = root; } + @Before + public void setUp() throws IOException { + fileStore = Files.getFileStore(root); + } + @After public void tearDown() throws IOException { // restore write permissions if (Files.exists(root)) { - Directories.enableAllWriteAccess(root); + Directories.enableAllWriteAccess(root, fileStore); } + fileStore = null; } @Test @@ -56,7 +65,7 @@ public void removeDirectoryDeletesTree() throws IOException { ImmutableList.of("A file in a subdirectory"), StandardCharsets.UTF_8); - Directories.remove(tree); + Directories.remove(tree, fileStore); assertThat(Files.exists(tree)).isFalse(); } @@ -75,11 +84,11 @@ public void changePermissionsForDelete() throws IOException { StandardCharsets.UTF_8); // remove write permissions - Directories.disableAllWriteAccess(tree); + Directories.disableAllWriteAccess(tree, fileStore); // directories are able to be removed, because the algorithm // changes the write permissions before performing the delete. - Directories.remove(tree); + Directories.remove(tree, fileStore); assertThat(Files.exists(tree)).isFalse(); } @@ -114,7 +123,7 @@ public void checkWriteDisabled() throws IOException { assertThat(Files.isWritable(subdir)).isTrue(); // remove write permissions - Directories.disableAllWriteAccess(tree); + Directories.disableAllWriteAccess(tree, fileStore); // check that write conditions have changed // If the unit tests were run as root, @@ -133,8 +142,7 @@ public OsXDirectoriesTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.osX() - .toBuilder() + Configuration.osX().toBuilder() .setAttributeViews("basic", "owner", "posix", "unix") .build()) .getRootDirectories(), @@ -148,8 +156,7 @@ public UnixDirectoriesTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.unix() - .toBuilder() + Configuration.unix().toBuilder() .setAttributeViews("basic", "owner", "posix", "unix") .build()) .getRootDirectories(), @@ -163,8 +170,7 @@ public WindowsDirectoriesTest() { super( Iterables.getFirst( Jimfs.newFileSystem( - Configuration.windows() - .toBuilder() + Configuration.windows().toBuilder() .setAttributeViews("basic", "owner", "dos", "acl", "posix", "user") .build()) .getRootDirectories(), diff --git a/src/test/java/build/buildfarm/common/io/UtilsTest.java b/src/test/java/build/buildfarm/common/io/UtilsTest.java index 34896e1eab..1422f27e35 100644 --- a/src/test/java/build/buildfarm/common/io/UtilsTest.java +++ b/src/test/java/build/buildfarm/common/io/UtilsTest.java @@ -47,7 +47,8 @@ public void setUp() throws IOException { @After public void tearDown() throws IOException { - Directories.remove(root); + fileStore = Files.getFileStore(root); + Directories.remove(root, fileStore); } @Test diff --git a/src/test/java/build/buildfarm/common/redis/BUILD b/src/test/java/build/buildfarm/common/redis/BUILD index 7b3978fb53..467f6fd335 100644 --- a/src/test/java/build/buildfarm/common/redis/BUILD +++ b/src/test/java/build/buildfarm/common/redis/BUILD @@ -5,10 +5,12 @@ COMMON_DEPS = [ "//src/main/java/build/buildfarm/common/redis", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", - "//third_party/jedis", + "@maven//:com_github_fppt_jedis_mock", + "@maven//:com_google_guava_guava", "@maven//:com_google_truth_truth", "@maven//:io_grpc_grpc_api", "@maven//:org_mockito_mockito_core", + "@maven//:redis_clients_jedis", ] NATIVE_REDIS_TESTS = [ diff --git a/src/test/java/build/buildfarm/common/redis/BalancedRedisQueueMockTest.java b/src/test/java/build/buildfarm/common/redis/BalancedRedisQueueMockTest.java index 2869b2edc9..61453672e6 100644 --- a/src/test/java/build/buildfarm/common/redis/BalancedRedisQueueMockTest.java +++ b/src/test/java/build/buildfarm/common/redis/BalancedRedisQueueMockTest.java @@ -15,23 +15,30 @@ package build.buildfarm.common.redis; import static com.google.common.truth.Truth.assertThat; +import static java.util.concurrent.Executors.newSingleThreadExecutor; +import static java.util.concurrent.TimeUnit.SECONDS; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; +import build.buildfarm.common.Queue; import build.buildfarm.common.StringVisitor; -import build.buildfarm.common.config.Queue; import com.google.common.collect.ImmutableList; +import java.time.Duration; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; +import java.util.concurrent.ExecutorService; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import redis.clients.jedis.Connection; +import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisCluster; /** @@ -46,28 +53,18 @@ @RunWith(JUnit4.class) public class BalancedRedisQueueMockTest { @Mock private JedisCluster redis; + @Mock private Connection connection; + @Mock private Queue subQueue; + + @SuppressWarnings("unused") // parameters are ignored + private Queue subQueueDecorate(Jedis jedis, String name) { + return subQueue; + } @Before public void setUp() { MockitoAnnotations.initMocks(this); - } - - // Function under test: removeFromDequeue - // Reason for testing: removing returns false because the queue is empty and there is nothing to - // remove - // Failure explanation: the queue was either not empty, or an error occured while removing from an - // empty queue - @Test - public void removeFromDequeueFalseOnEmpty() throws Exception { - // ARRANGE - when(redis.lrem(any(String.class), any(Long.class), any(String.class))).thenReturn(0L); - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of()); - - // ACT - Boolean success = queue.removeFromDequeue(redis, "foo"); - - // ASSERT - assertThat(success).isFalse(); + when(redis.getConnectionFromSlot(any(Integer.class))).thenReturn(connection); } // Function under test: removeFromDequeue @@ -78,8 +75,9 @@ public void removeFromDequeueFalseOnEmpty() throws Exception { @Test public void removeFromDequeueFalseWhenValueIsMissing() throws Exception { // ARRANGE - when(redis.lrem(any(String.class), any(Long.class), any(String.class))).thenReturn(0L); - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of()); + when(subQueue.removeFromDequeue(any(String.class))).thenReturn(false); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", ImmutableList.of("test"), this::subQueueDecorate); // ACT Boolean success = queue.removeFromDequeue(redis, "baz"); @@ -95,8 +93,9 @@ public void removeFromDequeueFalseWhenValueIsMissing() throws Exception { @Test public void removeFromDequeueTrueWhenValueExists() throws Exception { // ARRANGE - when(redis.lrem(any(String.class), any(Long.class), any(String.class))).thenReturn(1L); - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of()); + when(subQueue.removeFromDequeue(any(String.class))).thenReturn(true); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", ImmutableList.of("test"), this::subQueueDecorate); // ACT Boolean success = queue.removeFromDequeue(redis, "bar"); @@ -105,41 +104,49 @@ public void removeFromDequeueTrueWhenValueExists() throws Exception { assertThat(success).isTrue(); } - // Function under test: dequeue - // Reason for testing: the element is dequeued via nonblocking + // Function under test: take + // Reason for testing: the element is taken via nonblocking // Failure explanation: the element failed to dequeue @Test - public void dequeueExponentialBackoffElementDequeuedOnNonBlock() throws Exception { + public void takeElementDequeuedOnNonBlock() throws Exception { // MOCK - when(redis.rpoplpush(any(String.class), any(String.class))).thenReturn("foo"); + when(subQueue.poll()).thenReturn("foo"); + ExecutorService service = newSingleThreadExecutor(); // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of()); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", ImmutableList.of("test"), this::subQueueDecorate); // ACT - String val = queue.dequeue(redis); + String val = queue.take(redis, service); // ASSERT assertThat(val).isEqualTo("foo"); + service.shutdown(); + assertThat(service.awaitTermination(1, SECONDS)).isTrue(); } - // Function under test: dequeue - // Reason for testing: the element is dequeued via nonblocking + // Function under test: take + // Reason for testing: the element is taken via nonblocking // Failure explanation: the element failed to dequeue @Test - public void dequeueExponentialBackoffElementDequeuedOnBlock() throws Exception { + public void dequeueElementDequeuedOnBlock() throws Exception { // MOCK - when(redis.rpoplpush(any(String.class), any(String.class))).thenReturn(null); - when(redis.brpoplpush(any(String.class), any(String.class), any(int.class))).thenReturn("foo"); + when(subQueue.poll()).thenReturn(null); + when(subQueue.take(any(Duration.class))).thenReturn("foo"); // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of()); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", ImmutableList.of("test"), this::subQueueDecorate); + ExecutorService service = newSingleThreadExecutor(); // ACT - String val = queue.dequeue(redis); + String val = queue.take(redis, service); // ASSERT assertThat(val).isEqualTo("foo"); + service.shutdown(); + assertThat(service.awaitTermination(1, SECONDS)).isTrue(); } // Function under test: getCurrentPopQueue @@ -148,7 +155,8 @@ public void dequeueExponentialBackoffElementDequeuedOnBlock() throws Exception { @Test public void getCurrentPopQueueCanGet() throws Exception { // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("queue_name", ImmutableList.of()); + BalancedRedisQueue queue = + new BalancedRedisQueue("queue_name", ImmutableList.of(), this::subQueueDecorate); // ACT queue.getCurrentPopQueue(); @@ -160,7 +168,8 @@ public void getCurrentPopQueueCanGet() throws Exception { @Test public void getCurrentPopQueueIndexCanGet() throws Exception { // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("queue_name", ImmutableList.of()); + BalancedRedisQueue queue = + new BalancedRedisQueue("queue_name", ImmutableList.of(), this::subQueueDecorate); // ACT queue.getCurrentPopQueueIndex(); @@ -172,7 +181,8 @@ public void getCurrentPopQueueIndexCanGet() throws Exception { @Test public void getInternalQueueCanGet() throws Exception { // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("queue_name", ImmutableList.of()); + BalancedRedisQueue queue = + new BalancedRedisQueue("queue_name", ImmutableList.of(), this::subQueueDecorate); // ACT queue.getInternalQueue(0); @@ -184,7 +194,8 @@ public void getInternalQueueCanGet() throws Exception { @Test public void getDequeueNameCanGet() throws Exception { // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("queue_name", ImmutableList.of()); + BalancedRedisQueue queue = + new BalancedRedisQueue("queue_name", ImmutableList.of(), this::subQueueDecorate); // ACT String name = queue.getDequeueName(); @@ -199,7 +210,8 @@ public void getDequeueNameCanGet() throws Exception { @Test public void getNameNameIsStored() throws Exception { // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("queue_name", ImmutableList.of()); + BalancedRedisQueue queue = + new BalancedRedisQueue("queue_name", ImmutableList.of(), this::subQueueDecorate); // ACT String name = queue.getName(); @@ -214,10 +226,11 @@ public void getNameNameIsStored() throws Exception { @Test public void sizeInitialSizeIsZero() throws Exception { // MOCK - when(redis.llen(any(String.class))).thenReturn(0L); + when(subQueue.size()).thenReturn(0L); // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of()); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", ImmutableList.of("test"), this::subQueueDecorate); // ACT long size = queue.size(redis); @@ -232,28 +245,20 @@ public void sizeInitialSizeIsZero() throws Exception { @Test public void visitCheckVisitOfEachElement() throws Exception { // MOCK - when(redis.lrange(any(String.class), any(Long.class), any(Long.class))) - .thenReturn( - Arrays.asList( - "element 1", - "element 2", - "element 3", - "element 4", - "element 5", - "element 6", - "element 7", - "element 8")); + doAnswer( + invocation -> { + StringVisitor visitor = invocation.getArgument(0); + for (int i = 1; i <= 8; i++) { + visitor.visit("element " + i); + } + return null; + }) + .when(subQueue) + .visit(any(StringVisitor.class)); // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of()); - queue.push(redis, "element 1"); - queue.push(redis, "element 2"); - queue.push(redis, "element 3"); - queue.push(redis, "element 4"); - queue.push(redis, "element 5"); - queue.push(redis, "element 6"); - queue.push(redis, "element 7"); - queue.push(redis, "element 8"); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", ImmutableList.of("test"), this::subQueueDecorate); // ACT List visited = new ArrayList<>(); @@ -266,15 +271,16 @@ public void visit(String entry) { queue.visit(redis, visitor); // ASSERT - assertThat(visited.size()).isEqualTo(8); - assertThat(visited.contains("element 1")).isTrue(); - assertThat(visited.contains("element 2")).isTrue(); - assertThat(visited.contains("element 3")).isTrue(); - assertThat(visited.contains("element 4")).isTrue(); - assertThat(visited.contains("element 5")).isTrue(); - assertThat(visited.contains("element 6")).isTrue(); - assertThat(visited.contains("element 7")).isTrue(); - assertThat(visited.contains("element 8")).isTrue(); + assertThat(visited) + .containsExactly( + "element 1", + "element 2", + "element 3", + "element 4", + "element 5", + "element 6", + "element 7", + "element 8"); } // Function under test: visitDequeue @@ -283,20 +289,20 @@ public void visit(String entry) { @Test public void visitDequeueCheckVisitOfEachElement() throws Exception { // MOCK - when(redis.lrange(any(String.class), any(Long.class), any(Long.class))) - .thenReturn( - Arrays.asList( - "element 1", - "element 2", - "element 3", - "element 4", - "element 5", - "element 6", - "element 7", - "element 8")); + doAnswer( + invocation -> { + StringVisitor visitor = invocation.getArgument(0); + for (int i = 1; i <= 8; i++) { + visitor.visit("element " + i); + } + return null; + }) + .when(subQueue) + .visitDequeue(any(StringVisitor.class)); // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of()); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", ImmutableList.of("test"), this::subQueueDecorate); // ACT List visited = new ArrayList<>(); @@ -309,53 +315,35 @@ public void visit(String entry) { queue.visitDequeue(redis, visitor); // ASSERT - assertThat(visited.size()).isEqualTo(8); - assertThat(visited.contains("element 1")).isTrue(); - assertThat(visited.contains("element 2")).isTrue(); - assertThat(visited.contains("element 3")).isTrue(); - assertThat(visited.contains("element 4")).isTrue(); - assertThat(visited.contains("element 5")).isTrue(); - assertThat(visited.contains("element 6")).isTrue(); - assertThat(visited.contains("element 7")).isTrue(); - assertThat(visited.contains("element 8")).isTrue(); + assertThat(visited) + .containsExactly( + "element 1", + "element 2", + "element 3", + "element 4", + "element 5", + "element 6", + "element 7", + "element 8"); } // Function under test: isEvenlyDistributed // Reason for testing: an empty queue is always already evenly distributed // Failure explanation: evenly distributed is not working on the empty queue @Test - public void isEvenlyDistributedEmptyIsEvenlyDistributed() throws Exception { + public void emptyIsEvenlyDistributed() throws Exception { // MOCK - when(redis.llen(any(String.class))).thenReturn(0L); - - // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of()); - - // ACT - Boolean isEvenlyDistributed = queue.isEvenlyDistributed(redis); - - // ASSERT - verify(redis, times(2)).llen(any(String.class)); - assertThat(isEvenlyDistributed).isTrue(); - } - - // Function under test: isEvenlyDistributed for priority - // Reason for testing: an empty queue is always already evenly distributed - // Failure explanation: evenly distributed is not working on the empty queue - @Test - public void isEvenlyDistributedEmptyIsEvenlyDistributedPriority() throws Exception { - // MOCK - when(redis.zcard(any(String.class))).thenReturn(0L); + when(subQueue.size()).thenReturn(0L); // ARRANGE BalancedRedisQueue queue = - new BalancedRedisQueue("test", ImmutableList.of(), Queue.QUEUE_TYPE.priority); + new BalancedRedisQueue("test", ImmutableList.of("test"), this::subQueueDecorate); // ACT Boolean isEvenlyDistributed = queue.isEvenlyDistributed(redis); // ASSERT - verify(redis, times(2)).zcard(any(String.class)); + verify(subQueue, times(1)).size(); assertThat(isEvenlyDistributed).isTrue(); } @@ -363,74 +351,36 @@ public void isEvenlyDistributedEmptyIsEvenlyDistributedPriority() throws Excepti // Reason for testing: infinite queues allow queuing // Failure explanation: the queue is not accepting queuing when it should @Test - public void canQueueIfiniteQueueAllowsQueuing() throws Exception { - // MOCK - when(redis.llen(any(String.class))).thenReturn(999L); - - // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of(), -1); - - // ACT - boolean canQueue = queue.canQueue(redis); - - // ASSERT - assertThat(canQueue).isTrue(); - } - - // Function under test: canQueue - // Reason for testing: infinite queues allow queuing for priority - // Failure explanation: the queue is not accepting queuing when it should - @Test - public void canQueueIfinitePriorityQueueAllowsQueuing() throws Exception { - // MOCK - when(redis.zcard(any(String.class))).thenReturn(999L); - + public void canQueueInfiniteQueueAllowsQueuing() throws Exception { // ARRANGE BalancedRedisQueue queue = - new BalancedRedisQueue("test", ImmutableList.of(), -1, Queue.QUEUE_TYPE.priority); + new BalancedRedisQueue("test", ImmutableList.of("test"), this::subQueueDecorate); // ACT boolean canQueue = queue.canQueue(redis); // ASSERT + verifyNoInteractions(subQueue); assertThat(canQueue).isTrue(); } - // Function under test: canQueue for regular - // Reason for testing: Full queues do not allow queuing - // Failure explanation: the queue is still allowing queuing despite being full - @Test - public void canQueueFullQueueNotAllowsQueuing() throws Exception { - // MOCK - when(redis.llen(any(String.class))).thenReturn(123L); - - // ARRANGE - BalancedRedisQueue queue = new BalancedRedisQueue("test", ImmutableList.of(), 123); - - // ACT - boolean canQueue = queue.canQueue(redis); - - // ASSERT - assertThat(canQueue).isFalse(); - } - // Function under test: canQueue for priority // Reason for testing: Full queues do not allow queuing - // Failure explanation: the queue is still allowing queuing despite being full + // Failure explanation: the queue is still allows queueing despite being full @Test - public void canQueueFullPriorityQueueNotAllowsQueuing() throws Exception { + public void canQueueFullQueueNotAllowsQueueing() throws Exception { // MOCK - when(redis.zcard(any(String.class))).thenReturn(123L); + when(subQueue.size()).thenReturn(123L); // ARRANGE BalancedRedisQueue queue = - new BalancedRedisQueue("test", ImmutableList.of(), 123, Queue.QUEUE_TYPE.priority); + new BalancedRedisQueue("test", ImmutableList.of("test"), 123, this::subQueueDecorate); // ACT boolean canQueue = queue.canQueue(redis); // ASSERT - verify(redis).zcard(any(String.class)); + verify(subQueue, times(1)).size(); assertThat(canQueue).isFalse(); } } diff --git a/src/test/java/build/buildfarm/common/redis/BalancedRedisQueueTest.java b/src/test/java/build/buildfarm/common/redis/BalancedRedisQueueTest.java index 41fbece7c8..1aacd5605e 100644 --- a/src/test/java/build/buildfarm/common/redis/BalancedRedisQueueTest.java +++ b/src/test/java/build/buildfarm/common/redis/BalancedRedisQueueTest.java @@ -15,46 +15,48 @@ package build.buildfarm.common.redis; import static com.google.common.truth.Truth.assertThat; +import static java.util.concurrent.Executors.newSingleThreadExecutor; +import static java.util.concurrent.TimeUnit.SECONDS; import build.buildfarm.common.StringVisitor; import build.buildfarm.common.config.BuildfarmConfigs; -import build.buildfarm.common.config.Queue; import build.buildfarm.instance.shard.JedisClusterFactory; import com.google.common.collect.ImmutableList; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.ExecutorService; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.UnifiedJedis; /** * @class BalancedRedisQueueTest - * @brief tests A balanced redis queue. - * @details A balanced redis queue is an implementation of a queue data structure which internally - * uses multiple redis nodes to distribute the data across the cluster. Its important to know + * @brief tests A balanced cluster queue. + * @details A balanced cluster queue is an implementation of a queue data structure which internally + * uses multiple cluster nodes to distribute the data across the cluster. Its important to know * that the lifetime of the queue persists before and after the queue data structure is created - * (since it exists in redis). Therefore, two redis queues with the same name, would in fact be - * the same underlying redis queues. + * (since it exists in jedis). Therefore, two cluster queues with the same name, would in fact + * be the same underlying cluster queues. */ @RunWith(JUnit4.class) public class BalancedRedisQueueTest { private BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - private JedisCluster redis; + private UnifiedJedis jedis; @Before public void setUp() throws Exception { - configs.getBackplane().setRedisUri("redis://localhost:6379"); - redis = JedisClusterFactory.createTest(); + configs.getBackplane().setRedisUri("cluster://localhost:6379"); + jedis = JedisClusterFactory.createTest(); } @After public void tearDown() { - redis.close(); + jedis.close(); } // Function under test: BalancedRedisQueue @@ -63,96 +65,96 @@ public void tearDown() { @Test public void balancedRedisQueueCreateHashesConstructsWithoutError() throws Exception { // ACT - new BalancedRedisQueue("test", ImmutableList.of()); + new BalancedRedisQueue("test", ImmutableList.of(), RedisQueue::decorate); } - // Function under test: push + // Function under test: offer // Reason for testing: the queue can have a value pushed onto it - // Failure explanation: the queue is throwing an exception upon push + // Failure explanation: the queue is throwing an exception upon offer @Test public void pushPushWithoutError() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); // ACT - queue.push(redis, "foo"); + queue.offer(jedis, "foo"); } - // Function under test: push + // Function under test: offer // Reason for testing: the queue can have the different values pushed onto it // Failure explanation: the queue is throwing an exception upon pushing different values @Test public void pushPushDifferentWithoutError() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); // ACT - queue.push(redis, "foo"); - queue.push(redis, "bar"); + queue.offer(jedis, "foo"); + queue.offer(jedis, "bar"); } - // Function under test: push + // Function under test: offer // Reason for testing: the queue can have the same values pushed onto it // Failure explanation: the queue is throwing an exception upon pushing the same values @Test public void pushPushSameWithoutError() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); // ACT - queue.push(redis, "foo"); - queue.push(redis, "foo"); + queue.offer(jedis, "foo"); + queue.offer(jedis, "foo"); } - // Function under test: push + // Function under test: offer // Reason for testing: the queue can have many values pushed into it // Failure explanation: the queue is throwing an exception upon pushing many values @Test public void pushPushMany() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); // ACT for (int i = 0; i < 1000; ++i) { - queue.push(redis, "foo" + i); + queue.offer(jedis, "foo" + i); } } - // Function under test: push + // Function under test: offer // Reason for testing: the queue size increases as elements are pushed // Failure explanation: the queue size is not accurately reflecting the pushes @Test public void pushPushIncreasesSize() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); // ACT / ASSERT - assertThat(queue.size(redis)).isEqualTo(0); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(1); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(2); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(3); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(4); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(5); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(6); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(7); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(8); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(9); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(10); + assertThat(queue.size(jedis)).isEqualTo(0); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(1); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(2); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(3); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(4); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(5); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(6); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(7); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(8); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(9); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(10); } // Function under test: removeFromDequeue @@ -163,11 +165,11 @@ public void pushPushIncreasesSize() throws Exception { @Test public void removeFromDequeueFalseOnEmpty() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); // ACT - Boolean success = queue.removeFromDequeue(redis, "foo"); + Boolean success = queue.removeFromDequeue(jedis, "foo"); // ASSERT assertThat(success).isFalse(); @@ -181,17 +183,20 @@ public void removeFromDequeueFalseOnEmpty() throws Exception { @Test public void removeFromDequeueFalseWhenValueIsMissing() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); + ExecutorService service = newSingleThreadExecutor(); // ACT - queue.push(redis, "foo"); - queue.push(redis, "bar"); - queue.dequeue(redis); - queue.dequeue(redis); - Boolean success = queue.removeFromDequeue(redis, "baz"); + queue.offer(jedis, "foo"); + queue.offer(jedis, "bar"); + queue.take(jedis, service); + queue.take(jedis, service); + service.shutdown(); + Boolean success = queue.removeFromDequeue(jedis, "baz"); // ASSERT + assertThat(service.awaitTermination(0, SECONDS)).isTrue(); assertThat(success).isFalse(); } @@ -202,19 +207,22 @@ public void removeFromDequeueFalseWhenValueIsMissing() throws Exception { @Test public void removeFromDequeueTrueWhenValueExists() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); + ExecutorService service = newSingleThreadExecutor(); // ACT - queue.push(redis, "foo"); - queue.push(redis, "bar"); - queue.push(redis, "baz"); - queue.dequeue(redis); - queue.dequeue(redis); - queue.dequeue(redis); - Boolean success = queue.removeFromDequeue(redis, "bar"); + queue.offer(jedis, "foo"); + queue.offer(jedis, "bar"); + queue.offer(jedis, "baz"); + queue.take(jedis, service); + queue.take(jedis, service); + queue.take(jedis, service); + service.shutdown(); + Boolean success = queue.removeFromDequeue(jedis, "bar"); // ASSERT + assertThat(service.awaitTermination(0, SECONDS)).isTrue(); assertThat(success).isTrue(); } @@ -224,8 +232,8 @@ public void removeFromDequeueTrueWhenValueExists() throws Exception { @Test public void getNameNameIsStored() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("queue_name", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("queue_name", hashtags, RedisQueue::decorate); // ACT String name = queue.getName(); @@ -240,8 +248,9 @@ public void getNameNameIsStored() throws Exception { @Test public void getNameNameHasHashtagRemovedFront() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("{hash}queue_name", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = + new BalancedRedisQueue("{hash}queue_name", hashtags, RedisQueue::decorate); // ACT String name = queue.getName(); @@ -255,9 +264,9 @@ public void getNameNameHasHashtagRemovedFront() throws Exception { @Test public void getNameNameHasHashtagRemovedFrontPriority() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); BalancedRedisQueue queue = - new BalancedRedisQueue("{hash}queue_name", hashtags, Queue.QUEUE_TYPE.priority); + new BalancedRedisQueue("{hash}queue_name", hashtags, RedisPriorityQueue::decorate); // ACT String name = queue.getName(); @@ -271,9 +280,10 @@ public void getNameNameHasHashtagRemovedFrontPriority() throws Exception { @Test public void getNameNameHasHashtagColonRemovedFront() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); // similar to what has been seen in configuration files - BalancedRedisQueue queue = new BalancedRedisQueue("{Execution}:QueuedOperations", hashtags); + BalancedRedisQueue queue = + new BalancedRedisQueue("{Execution}:QueuedOperations", hashtags, RedisQueue::decorate); // ACT String name = queue.getName(); @@ -287,10 +297,11 @@ public void getNameNameHasHashtagColonRemovedFront() throws Exception { @Test public void getNameNameHasHashtagColonRemovedFrontPriority() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); // similar to what has been seen in configuration files BalancedRedisQueue queue = - new BalancedRedisQueue("{Execution}:QueuedOperations", hashtags, Queue.QUEUE_TYPE.priority); + new BalancedRedisQueue( + "{Execution}:QueuedOperations", hashtags, RedisPriorityQueue::decorate); // ACT String name = queue.getName(); @@ -304,8 +315,9 @@ public void getNameNameHasHashtagColonRemovedFrontPriority() throws Exception { @Test public void getNameNameHasHashtagRemovedBack() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("queue_name{hash}", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = + new BalancedRedisQueue("queue_name{hash}", hashtags, RedisQueue::decorate); // ACT String name = queue.getName(); @@ -319,8 +331,9 @@ public void getNameNameHasHashtagRemovedBack() throws Exception { @Test public void getNameNameHasHashtagRemovedMiddle() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("queue_{hash}name", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = + new BalancedRedisQueue("queue_{hash}name", hashtags, RedisQueue::decorate); // ACT String name = queue.getName(); @@ -334,8 +347,9 @@ public void getNameNameHasHashtagRemovedMiddle() throws Exception { @Test public void getNameNameHasHashtagRemovedFrontBack() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("{hash}queue_name{hash}", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = + new BalancedRedisQueue("{hash}queue_name{hash}", hashtags, RedisQueue::decorate); // ACT String name = queue.getName(); @@ -344,77 +358,84 @@ public void getNameNameHasHashtagRemovedFrontBack() throws Exception { } // Function under test: size - // Reason for testing: size adjusts with push and dequeue + // Reason for testing: size adjusts with offer and take // Failure explanation: size is incorrectly reporting the expected queue size @Test public void sizeAdjustPushPop() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); + ExecutorService service = newSingleThreadExecutor(); // ACT / ASSERT - assertThat(queue.size(redis)).isEqualTo(0); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(1); - queue.push(redis, "bar"); - assertThat(queue.size(redis)).isEqualTo(2); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(3); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(4); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(5); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(6); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(5); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(4); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(3); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(2); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(1); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(0); + assertThat(queue.size(jedis)).isEqualTo(0); + queue.offer(jedis, "foo"); + assertThat(queue.size(jedis)).isEqualTo(1); + queue.offer(jedis, "bar"); + assertThat(queue.size(jedis)).isEqualTo(2); + queue.offer(jedis, "baz"); + assertThat(queue.size(jedis)).isEqualTo(3); + queue.offer(jedis, "baz"); + assertThat(queue.size(jedis)).isEqualTo(4); + queue.offer(jedis, "baz"); + assertThat(queue.size(jedis)).isEqualTo(5); + queue.offer(jedis, "baz"); + assertThat(queue.size(jedis)).isEqualTo(6); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(5); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(4); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(3); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(2); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(1); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(0); + service.shutdown(); + assertThat(service.awaitTermination(0, SECONDS)).isTrue(); } // Function under test: size - // Reason for testing: size adjusts with push and dequeue + // Reason for testing: size adjusts with offer and take // Failure explanation: size is incorrectly reporting the expected queue size @Test public void sizeAdjustPushPopPriority() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, Queue.QUEUE_TYPE.priority); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", hashtags, RedisPriorityQueue::decorate); + ExecutorService service = newSingleThreadExecutor(); // ACT / ASSERT - assertThat(queue.size(redis)).isEqualTo(0); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(1); - queue.push(redis, "bar"); - assertThat(queue.size(redis)).isEqualTo(2); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(3); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(4); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(5); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(6); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(5); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(4); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(3); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(2); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(1); - queue.dequeue(redis); - assertThat(queue.size(redis)).isEqualTo(0); + assertThat(queue.size(jedis)).isEqualTo(0); + queue.offer(jedis, "foo1"); + assertThat(queue.size(jedis)).isEqualTo(1); + queue.offer(jedis, "foo2"); + assertThat(queue.size(jedis)).isEqualTo(2); + queue.offer(jedis, "foo3"); + assertThat(queue.size(jedis)).isEqualTo(3); + queue.offer(jedis, "foo4"); + assertThat(queue.size(jedis)).isEqualTo(4); + queue.offer(jedis, "foo5"); + assertThat(queue.size(jedis)).isEqualTo(5); + queue.offer(jedis, "foo6"); + assertThat(queue.size(jedis)).isEqualTo(6); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(5); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(4); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(3); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(2); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(1); + queue.take(jedis, service); + assertThat(queue.size(jedis)).isEqualTo(0); + service.shutdown(); + assertThat(service.awaitTermination(0, SECONDS)).isTrue(); } // Function under test: visit @@ -423,16 +444,16 @@ public void sizeAdjustPushPopPriority() throws Exception { @Test public void visitCheckVisitOfEachElement() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); - queue.push(redis, "element 1"); - queue.push(redis, "element 2"); - queue.push(redis, "element 3"); - queue.push(redis, "element 4"); - queue.push(redis, "element 5"); - queue.push(redis, "element 6"); - queue.push(redis, "element 7"); - queue.push(redis, "element 8"); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); + queue.offer(jedis, "element 1"); + queue.offer(jedis, "element 2"); + queue.offer(jedis, "element 3"); + queue.offer(jedis, "element 4"); + queue.offer(jedis, "element 5"); + queue.offer(jedis, "element 6"); + queue.offer(jedis, "element 7"); + queue.offer(jedis, "element 8"); // ACT List visited = new ArrayList<>(); @@ -442,7 +463,7 @@ public void visit(String entry) { visited.add(entry); } }; - queue.visit(redis, visitor); + queue.visit(jedis, visitor); // ASSERT assertThat(visited.size()).isEqualTo(8); @@ -462,16 +483,17 @@ public void visit(String entry) { @Test public void visitCheckVisitOfEachElementPriority() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, Queue.QUEUE_TYPE.priority); - queue.push(redis, "element 1"); - queue.push(redis, "element 2"); - queue.push(redis, "element 3"); - queue.push(redis, "element 4"); - queue.push(redis, "element 5"); - queue.push(redis, "element 6"); - queue.push(redis, "element 7"); - queue.push(redis, "element 8"); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", hashtags, RedisPriorityQueue::decorate); + queue.offer(jedis, "element 1"); + queue.offer(jedis, "element 2"); + queue.offer(jedis, "element 3"); + queue.offer(jedis, "element 4"); + queue.offer(jedis, "element 5"); + queue.offer(jedis, "element 6"); + queue.offer(jedis, "element 7"); + queue.offer(jedis, "element 8"); // ACT List visited = new ArrayList<>(); @@ -481,7 +503,7 @@ public void visit(String entry) { visited.add(entry); } }; - queue.visit(redis, visitor); + queue.visit(jedis, visitor); // ASSERT assertThat(visited.size()).isEqualTo(8); @@ -501,11 +523,11 @@ public void visit(String entry) { @Test public void isEvenlyDistributedEmptyIsEvenlyDistributed() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); // ACT - Boolean isEvenlyDistributed = queue.isEvenlyDistributed(redis); + Boolean isEvenlyDistributed = queue.isEvenlyDistributed(jedis); // ASSERT assertThat(isEvenlyDistributed).isTrue(); @@ -517,11 +539,12 @@ public void isEvenlyDistributedEmptyIsEvenlyDistributed() throws Exception { @Test public void isEvenlyDistributedEmptyIsEvenlyDistributedPriority() throws Exception { // ARRANGE - List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(redis); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, Queue.QUEUE_TYPE.priority); + List hashtags = RedisNodeHashes.getEvenlyDistributedHashes(jedis); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", hashtags, RedisPriorityQueue::decorate); // ACT - Boolean isEvenlyDistributed = queue.isEvenlyDistributed(redis); + Boolean isEvenlyDistributed = queue.isEvenlyDistributed(jedis); // ASSERT assertThat(isEvenlyDistributed).isTrue(); @@ -535,13 +558,13 @@ public void isEvenlyDistributedEmptyIsEvenlyDistributedPriority() throws Excepti public void isEvenlyDistributedFourNodesFourHundredPushesIsEven() throws Exception { // ARRANGE List hashtags = Arrays.asList("node1", "node2", "node3", "node4"); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); // ACT for (int i = 0; i < 400; ++i) { - queue.push(redis, "foo"); + queue.offer(jedis, "foo"); } - Boolean isEvenlyDistributed = queue.isEvenlyDistributed(redis); + Boolean isEvenlyDistributed = queue.isEvenlyDistributed(jedis); // ASSERT assertThat(isEvenlyDistributed).isTrue(); @@ -555,13 +578,14 @@ public void isEvenlyDistributedFourNodesFourHundredPushesIsEven() throws Excepti public void isEvenlyDistributedFourNodesFourHundredPushesIsEvenPriority() throws Exception { // ARRANGE List hashtags = Arrays.asList("node1", "node2", "node3", "node4"); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, Queue.QUEUE_TYPE.priority); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", hashtags, RedisPriorityQueue::decorate); // ACT for (int i = 0; i < 400; ++i) { - queue.push(redis, "foo"); + queue.offer(jedis, "foo"); } - Boolean isEvenlyDistributed = queue.isEvenlyDistributed(redis); + Boolean isEvenlyDistributed = queue.isEvenlyDistributed(jedis); // ASSERT assertThat(isEvenlyDistributed).isTrue(); @@ -575,13 +599,13 @@ public void isEvenlyDistributedFourNodesFourHundredPushesIsEvenPriority() throws public void isEvenlyDistributedFourNodesFourHundredOnePushesIsNotEven() throws Exception { // ARRANGE List hashtags = Arrays.asList("node1", "node2", "node3", "node4"); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); // ACT for (int i = 0; i < 401; ++i) { - queue.push(redis, "foo"); + queue.offer(jedis, "foo" + i); } - Boolean isEvenlyDistributed = queue.isEvenlyDistributed(redis); + Boolean isEvenlyDistributed = queue.isEvenlyDistributed(jedis); // ASSERT assertThat(isEvenlyDistributed).isFalse(); @@ -595,13 +619,14 @@ public void isEvenlyDistributedFourNodesFourHundredOnePushesIsNotEven() throws E public void isEvenlyDistributedFourNodesFourHundredOnePushesIsNotEvenPriority() throws Exception { // ARRANGE List hashtags = Arrays.asList("node1", "node2", "node3", "node4"); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, Queue.QUEUE_TYPE.priority); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", hashtags, RedisPriorityQueue::decorate); // ACT for (int i = 0; i < 401; ++i) { - queue.push(redis, "foo"); + queue.offer(jedis, "foo" + i); } - Boolean isEvenlyDistributed = queue.isEvenlyDistributed(redis); + Boolean isEvenlyDistributed = queue.isEvenlyDistributed(jedis); // ASSERT assertThat(isEvenlyDistributed).isFalse(); @@ -615,25 +640,28 @@ public void isEvenlyDistributedFourNodesFourHundredOnePushesIsNotEvenPriority() public void isEvenlyDistributedSingleNodeAlwaysEvenlyDistributes() throws Exception { // ARRANGE List hashtags = Collections.singletonList("single_node"); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); + ExecutorService service = newSingleThreadExecutor(); // ACT / ASSERT - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); + queue.offer(jedis, "foo"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + service.shutdown(); + assertThat(service.awaitTermination(0, SECONDS)).isTrue(); } // Function under test: isEvenlyDistributed @@ -644,25 +672,29 @@ public void isEvenlyDistributedSingleNodeAlwaysEvenlyDistributes() throws Except public void isEvenlyDistributedSingleNodeAlwaysEvenlyDistributesPriority() throws Exception { // ARRANGE List hashtags = Collections.singletonList("single_node"); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, Queue.QUEUE_TYPE.priority); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", hashtags, RedisPriorityQueue::decorate); + ExecutorService service = newSingleThreadExecutor(); // ACT / ASSERT - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); + queue.offer(jedis, "foo"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "bar"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "baz"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "quux"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + service.shutdown(); + assertThat(service.awaitTermination(0, SECONDS)).isTrue(); } // Function under test: isEvenlyDistributed @@ -672,38 +704,41 @@ public void isEvenlyDistributedSingleNodeAlwaysEvenlyDistributesPriority() throw public void isEvenlyDistributedTwoNodeExample() throws Exception { // ARRANGE List hashtags = Arrays.asList("node_1", "node_2"); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags); + BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, RedisQueue::decorate); + ExecutorService service = newSingleThreadExecutor(); // ACT / ASSERT - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo1"); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.offer(jedis, "foo2"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo3"); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.offer(jedis, "foo4"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo5"); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.offer(jedis, "foo6"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo7"); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + service.shutdown(); + assertThat(service.awaitTermination(0, SECONDS)).isTrue(); } // Function under test: isEvenlyDistributed @@ -713,37 +748,41 @@ public void isEvenlyDistributedTwoNodeExample() throws Exception { public void isEvenlyDistributedTwoNodeExamplePriority() throws Exception { // ARRANGE List hashtags = Arrays.asList("node_1", "node_2"); - BalancedRedisQueue queue = new BalancedRedisQueue("test", hashtags, Queue.QUEUE_TYPE.priority); + BalancedRedisQueue queue = + new BalancedRedisQueue("test", hashtags, RedisPriorityQueue::decorate); + ExecutorService service = newSingleThreadExecutor(); // ACT / ASSERT - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo"); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.push(redis, "foo1"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo2"); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.push(redis, "foo3"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo4"); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.push(redis, "foo5"); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.push(redis, "foo6"); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isFalse(); - queue.dequeue(redis); - assertThat(queue.isEvenlyDistributed(redis)).isTrue(); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo"); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.offer(jedis, "foo1"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo2"); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.offer(jedis, "foo3"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo4"); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.offer(jedis, "foo5"); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.offer(jedis, "foo6"); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isFalse(); + queue.take(jedis, service); + assertThat(queue.isEvenlyDistributed(jedis)).isTrue(); + service.shutdown(); + assertThat(service.awaitTermination(0, SECONDS)).isTrue(); } } diff --git a/src/test/java/build/buildfarm/common/redis/RedisClientTest.java b/src/test/java/build/buildfarm/common/redis/RedisClientTest.java index dd124227c9..16e58c0627 100644 --- a/src/test/java/build/buildfarm/common/redis/RedisClientTest.java +++ b/src/test/java/build/buildfarm/common/redis/RedisClientTest.java @@ -26,6 +26,7 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.exceptions.JedisClusterOperationException; import redis.clients.jedis.exceptions.JedisConnectionException; @RunWith(JUnit4.class) @@ -75,4 +76,22 @@ public void runExceptionSocketTimeoutExceptionIsDeadlineExceeded() } assertThat(status.getCode()).isEqualTo(Code.DEADLINE_EXCEEDED); } + + @Test + public void runJedisClusterMaxAttemptsExceptionIsUnavailable() { + RedisClient client = new RedisClient(mock(JedisCluster.class)); + Status status = Status.UNKNOWN; + try { + JedisClusterOperationException jcoe = + new JedisClusterOperationException("No more cluster attempts left."); + jcoe.addSuppressed(new JedisConnectionException(new SocketException("Connection reset"))); + client.run( + jedis -> { + throw jcoe; + }); + } catch (IOException e) { + status = Status.fromThrowable(e); + } + assertThat(status.getCode()).isEqualTo(Code.UNAVAILABLE); + } } diff --git a/src/test/java/build/buildfarm/common/redis/RedisHashMapTest.java b/src/test/java/build/buildfarm/common/redis/RedisHashMapTest.java index 95a530982c..d66f00baf4 100644 --- a/src/test/java/build/buildfarm/common/redis/RedisHashMapTest.java +++ b/src/test/java/build/buildfarm/common/redis/RedisHashMapTest.java @@ -21,6 +21,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import org.junit.After; @@ -28,7 +29,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.UnifiedJedis; /** * @class RedisHashMapTest @@ -41,7 +42,7 @@ @RunWith(JUnit4.class) public class RedisHashMapTest { private BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - private JedisCluster redis; + private UnifiedJedis redis; @Before public void setUp() throws Exception { @@ -255,4 +256,18 @@ public void redisRemoveAll() throws Exception { Map elements = map.asMap(redis); assertThat(elements.equals(expected)).isTrue(); } + + @Test + public void redisMget() { + RedisHashMap map = new RedisHashMap("test"); + map.insert(redis, "key1", "value1"); + map.insert(redis, "key2", "value2"); + map.insert(redis, "key3", "value3"); + map.insert(redis, "key4", "value4"); + + Iterable fields = Arrays.asList("key2", "key3"); + List expected = Arrays.asList("value2", "value3"); + + assertThat(map.mget(redis, fields)).containsExactlyElementsIn(expected); + } } diff --git a/src/test/java/build/buildfarm/common/redis/RedisNodeHashesMockTest.java b/src/test/java/build/buildfarm/common/redis/RedisNodeHashesMockTest.java index 3d421e6afa..e5a02f8e71 100644 --- a/src/test/java/build/buildfarm/common/redis/RedisNodeHashesMockTest.java +++ b/src/test/java/build/buildfarm/common/redis/RedisNodeHashesMockTest.java @@ -15,20 +15,29 @@ package build.buildfarm.common.redis; import static com.google.common.truth.Truth.assertThat; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; +import static redis.clients.jedis.Protocol.ClusterKeyword.SHARDS; +import static redis.clients.jedis.Protocol.Command.CLUSTER; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import redis.clients.jedis.Jedis; +import redis.clients.jedis.Connection; +import redis.clients.jedis.ConnectionPool; import redis.clients.jedis.JedisCluster; -import redis.clients.jedis.JedisPool; +import redis.clients.jedis.resps.ClusterShardInfo; +import redis.clients.jedis.util.SafeEncoder; /** * @class RedisNodeHashesMockTest @@ -46,14 +55,23 @@ public class RedisNodeHashesMockTest { @Test public void getEvenlyDistributedHashesCanRetrieveDistributedHashes() throws Exception { // ARRANGE - Jedis node = mock(Jedis.class); - when(node.clusterSlots()).thenReturn(Collections.singletonList(Arrays.asList(0L, 100L))); - - JedisPool pool = mock(JedisPool.class); - when(pool.getResource()).thenReturn(node); + Connection connection = spy(Connection.class); + doNothing().when(connection).connect(); + doNothing().when(connection).sendCommand(CLUSTER, SHARDS); + doReturn( + Arrays.asList( + Arrays.asList( + SafeEncoder.encode(ClusterShardInfo.SLOTS), Arrays.asList(0L, 100L), + SafeEncoder.encode(ClusterShardInfo.NODES), Arrays.asList()))) + .when(connection) + .getObjectMultiBulkReply(); + + ConnectionPool pool = mock(ConnectionPool.class); + when(pool.getResource()).thenReturn(connection); + connection.setHandlingPool(pool); JedisCluster redis = mock(JedisCluster.class); - Map poolMap = new HashMap<>(); + Map poolMap = new HashMap<>(); poolMap.put("key1", pool); when(redis.getClusterNodes()).thenReturn(poolMap); @@ -62,6 +80,9 @@ public void getEvenlyDistributedHashesCanRetrieveDistributedHashes() throws Exce // ASSERT assertThat(hashtags.isEmpty()).isFalse(); + verify(pool, times(1)).getResource(); + verify(pool, times(1)).returnResource(connection); + verifyNoMoreInteractions(pool); } // Function under test: getEvenlyDistributedHashes @@ -95,15 +116,27 @@ public void getEvenlyDistributedHashesCanConstruct() throws Exception { @Test public void getEvenlyDistributedHashesWithPrefixExpectedPrefixHashes() throws Exception { // ARRANGE - Jedis node = mock(Jedis.class); - when(node.clusterSlots()) - .thenReturn(Arrays.asList(Arrays.asList(0L, 100L), Arrays.asList(101L, 200L))); - - JedisPool pool = mock(JedisPool.class); - when(pool.getResource()).thenReturn(node); + Connection connection = spy(Connection.class); + doNothing().when(connection).connect(); + doNothing().when(connection).sendCommand(CLUSTER, SHARDS); + doReturn( + Arrays.asList( + Arrays.asList( + SafeEncoder.encode(ClusterShardInfo.SLOTS), Arrays.asList(0L, 100L), + SafeEncoder.encode(ClusterShardInfo.NODES), Arrays.asList()), + Arrays.asList( + SafeEncoder.encode(ClusterShardInfo.SLOTS), Arrays.asList(101L, 200L), + SafeEncoder.encode(ClusterShardInfo.NODES), Arrays.asList()))) + .when(connection) + .getObjectMultiBulkReply(); + doReturn(false).when(connection).isBroken(); + + ConnectionPool pool = mock(ConnectionPool.class); + when(pool.getResource()).thenReturn(connection); + connection.setHandlingPool(pool); JedisCluster redis = mock(JedisCluster.class); - Map poolMap = new HashMap<>(); + Map poolMap = new HashMap<>(); poolMap.put("key1", pool); when(redis.getClusterNodes()).thenReturn(poolMap); @@ -112,8 +145,17 @@ public void getEvenlyDistributedHashesWithPrefixExpectedPrefixHashes() throws Ex RedisNodeHashes.getEvenlyDistributedHashesWithPrefix(redis, "Execution"); // ASSERT + verify(connection, times(1)).sendCommand(CLUSTER, SHARDS); + verify(connection, times(1)).getObjectMultiBulkReply(); + verify(connection, times(1)).close(); + verify(connection, times(1)).isBroken(); + verify(connection, times(1)).setHandlingPool(pool); + verifyNoMoreInteractions(connection); assertThat(hashtags.size()).isEqualTo(2); assertThat(hashtags.get(0)).isEqualTo("Execution:97"); assertThat(hashtags.get(1)).isEqualTo("Execution:66"); + verify(pool, times(1)).getResource(); + verify(pool, times(1)).returnResource(connection); + verifyNoMoreInteractions(pool); } } diff --git a/src/test/java/build/buildfarm/common/redis/RedisPriorityQueueMockTest.java b/src/test/java/build/buildfarm/common/redis/RedisPriorityQueueMockTest.java index 300dec24a5..b2a9981e55 100644 --- a/src/test/java/build/buildfarm/common/redis/RedisPriorityQueueMockTest.java +++ b/src/test/java/build/buildfarm/common/redis/RedisPriorityQueueMockTest.java @@ -16,11 +16,14 @@ import static com.google.common.truth.Truth.assertThat; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import build.buildfarm.common.StringVisitor; +import java.time.Clock; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -32,7 +35,7 @@ import org.junit.runners.JUnit4; import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.Jedis; /** * @class RedisPriorityQueueMockTest @@ -45,8 +48,8 @@ */ @RunWith(JUnit4.class) public class RedisPriorityQueueMockTest { - @Mock private JedisCluster redis; - @Mock private Timestamp time; + @Mock private Jedis redis; + @Mock private Clock clock; @Before public void setUp() { @@ -59,115 +62,113 @@ public void setUp() { @Test public void redisPriorityQueueConstructsWithoutError() throws Exception { // ACT - new RedisPriorityQueue("test"); + new RedisPriorityQueue(redis, "test"); } - // Function under test: push - // Reason for testing: the queue can have a value pushed onto it - // Failure explanation: the queue is throwing an exception upon push + // Function under test: offer + // Reason for testing: the queue can have a value offered to it + // Failure explanation: the queue is throwing an exception upon offer @Test - public void pushPushWithoutError() throws Exception { + public void offerOfferWithoutError() throws Exception { // ARRANGE - when(time.getNanos()).thenReturn(123L); - RedisPriorityQueue queue = new RedisPriorityQueue("test", time); + when(clock.millis()).thenReturn(123L); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test", clock); // ACT - queue.push(redis, "foo"); + queue.offer("foo"); // ASSERT verify(redis, times(1)).zadd("test", 0, "123:foo"); } - // Function under test: push - // Reason for testing: the queue can have the different values pushed onto it - // Failure explanation: the queue is throwing an exception upon pushing different values + // Function under test: offer + // Reason for testing: the queue can have the different values offered onto it + // Failure explanation: the queue is throwing an exception upon offering different values @Test - public void pushPushDifferentWithoutError() throws Exception { + public void offerOfferDifferentWithoutError() throws Exception { // ARRANGE - when(time.getNanos()).thenReturn(123L, 124L); - RedisPriorityQueue queue = new RedisPriorityQueue("test", time); + when(clock.millis()).thenReturn(123L, 124L); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test", clock); // ACT - queue.push(redis, "foo"); - queue.push(redis, "bar"); + queue.offer("foo"); + queue.offer("bar"); // ASSERT verify(redis, times(1)).zadd("test", 0, "123:foo"); verify(redis, times(1)).zadd("test", 0, "124:bar"); } - // Function under test: push - // Reason for testing: the queue can have the same values pushed onto it - // Failure explanation: the queue is throwing an exception upon pushing the same values + // Function under test: offer + // Reason for testing: the queue can have the same values offered to it + // Failure explanation: the queue is throwing an exception upon offering the same values @Test - public void pushPushSameWithoutError() throws Exception { + public void offerOfferSameWithoutError() throws Exception { // ARRANGE - when(time.getNanos()).thenReturn(123L, 124L); - RedisPriorityQueue queue = new RedisPriorityQueue("test", time); + when(clock.millis()).thenReturn(123L, 124L); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test", clock); // ACT - queue.push(redis, "foo"); - queue.push(redis, "foo"); + queue.offer("foo"); + queue.offer("foo"); // ASSERT verify(redis, times(1)).zadd("test", 0, "123:foo"); verify(redis, times(1)).zadd("test", 0, "124:foo"); } - // Function under test: push - // Reason for testing: the queue can have the same values pushed onto it - // Failure explanation: the queue is throwing an exception upon pushing the same values + // Function under test: offer + // Reason for testing: the queue can have the same values offered to it + // Failure explanation: the queue throws an exception when offered the same values @Test - public void pushPushPriorityWithoutError() throws Exception { + public void offerOfferPriorityWithoutError() throws Exception { // ARRANGE - when(time.getNanos()).thenReturn(123L, 124L); - RedisPriorityQueue queue = new RedisPriorityQueue("test", time); + when(clock.millis()).thenReturn(123L, 124L); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test", clock); // ACT - queue.push(redis, "foo", 1); - queue.push(redis, "foo2", 2); + queue.offer("foo", 1); + queue.offer("foo2", 2); // ASSERT verify(redis, times(1)).zadd("test", 1, "123:foo"); verify(redis, times(1)).zadd("test", 2, "124:foo2"); } - // Function under test: push - // Reason for testing: the queue can have many values pushed into it - // Failure explanation: the queue is throwing an exception upon pushing many values + // Function under test: offer + // Reason for testing: the queue can have many values offered to it + // Failure explanation: the queue throws an exception when offered many values @Test - public void pushPushMany() throws Exception { + public void offerMany() throws Exception { // ARRANGE - when(time.getNanos()).thenReturn(123L); - RedisPriorityQueue queue = new RedisPriorityQueue("test", time); + when(clock.millis()).thenReturn(123L); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test", clock); // ACT for (int i = 0; i < 1000; ++i) { - queue.push(redis, "foo" + i); + queue.offer("foo" + i); } // ASSERT - for (int i = 0; i < 1000; ++i) { - verify(redis, times(1)).zadd("test", 0, "123:foo" + i); - } + verify(redis, times(1000)).zadd(eq("test"), eq(0.0), any(String.class)); } - // Function under test: push - // Reason for testing: the queue size increases as elements are pushed - // Failure explanation: the queue size is not accurately reflecting the pushes + // Function under test: offer + // Reason for testing: the queue size increases as elements are offered + // Failure explanation: the queue size does not reflect the offerings @Test - public void pushCallsLPush() throws Exception { + public void offerCallsZAdd() throws Exception { // ARRANGE - when(time.getNanos()).thenReturn(123L, 124L, 125L); - RedisPriorityQueue queue = new RedisPriorityQueue("test", time); + when(clock.millis()).thenReturn(123L, 124L, 125L); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test", clock); // ACT - queue.push(redis, "foo", 0); - queue.push(redis, "foo1", 2); - queue.push(redis, "foo2", 2); + queue.offer("foo", 0); + queue.offer("foo1", 2); + queue.offer("foo2", 2); // ASSERT - verify(time, times(3)).getNanos(); + verify(clock, times(3)).millis(); verify(redis, times(1)).zadd("test", 0, "123:foo"); verify(redis, times(1)).zadd("test", 2, "124:foo1"); verify(redis, times(1)).zadd("test", 2, "125:foo2"); @@ -181,120 +182,87 @@ public void pushCallsLPush() throws Exception { public void removeFromDequeueRemoveADequeueValue() throws Exception { // ARRANGE when(redis.lrem("test_dequeue", -1, "foo")).thenReturn(1L); - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT - boolean wasRemoved = queue.removeFromDequeue(redis, "foo"); + boolean wasRemoved = queue.removeFromDequeue("foo"); // ASSERT assertThat(wasRemoved).isTrue(); verify(redis, times(1)).lrem("test_dequeue", -1, "foo"); } - // Function under test: dequeue - // Reason for testing: the element is able to be dequeued - // Failure explanation: something prevented the element from being dequeued + // Function under test: take + // Reason for testing: the element is able to be taken + // Failure explanation: something prevented the element from being taken @Test public void dequeueElementCanBeDequeuedWithTimeout() throws Exception { // ARRANGE when(redis.eval(any(String.class), any(List.class), any(List.class))).thenReturn("foo"); - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT - queue.push(redis, "foo"); - String val = queue.dequeue(redis, 1); + String val = queue.take(Duration.ofSeconds(1)); // ASSERT assertThat(val).isEqualTo("foo"); } - // Function under test: dequeue - // Reason for testing: element is not dequeued - // Failure explanation: element was dequeued + // Function under test: take + // Reason for testing: element is not taken + // Failure explanation: element was taken @Test public void dequeueElementIsNotDequeuedIfTimeRunsOut() throws Exception { // ARRANGE when(redis.eval(any(String.class), any(List.class), any(List.class))).thenReturn(null); - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT - queue.push(redis, "foo"); - String val = queue.dequeue(redis, 5); + String val = queue.take(Duration.ofMillis(100)); // ASSERT assertThat(val).isEqualTo(null); } - // Function under test: dequeue - // Reason for testing: the dequeue is interrupted - // Failure explanation: the dequeue was not interrupted as expected + // Function under test: take + // Reason for testing: the take is interrupted + // Failure explanation: the take was not interrupted as expected @Test public void dequeueInterrupt() throws Exception { // ARRANGE when(redis.eval(any(String.class), any(List.class), any(List.class))).thenReturn(null); - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT - queue.push(redis, "foo"); Thread call = new Thread( () -> { try { - queue.dequeue(redis, 100000); + queue.take(Duration.ofDays(1)); } catch (Exception e) { } }); call.start(); call.interrupt(); + call.join(); } - // Function under test: nonBlockingDequeue - // Reason for testing: the element is able to be dequeued - // Failure explanation: something prevented the element from being dequeued + // Function under test: poll + // Reason for testing: the element is able to be polled + // Failure explanation: something prevented the element from being polled @Test public void nonBlockingDequeueElementCanBeDequeued() throws Exception { // ARRANGE when(redis.eval(any(String.class), any(List.class), any(List.class))).thenReturn("foo"); - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT - queue.push(redis, "foo"); - String val = queue.nonBlockingDequeue(redis); + String val = queue.poll(); // ASSERT assertThat(val).isEqualTo("foo"); } - // Function under test: getName - // Reason for testing: the name can be received - // Failure explanation: name does not match what it should - @Test - public void getNameNameIsStored() throws Exception { - // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("queue_name"); - - // ACT - String name = queue.getName(); - - // ASSERT - assertThat(name).isEqualTo("queue_name"); - } - - // Function under test: getDequeueName - // Reason for testing: the name can be received - // Failure explanation: name does not match what it should - @Test - public void getDequeueNameNameIsStored() throws Exception { - // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("queue_name"); - - // ACT - String name = queue.getDequeueName(); - - // ASSERT - assertThat(name).isEqualTo("queue_name_dequeue"); - } - // Function under test: visit // Reason for testing: each element in the queue can be visited // Failure explanation: we are unable to visit each element in the queue @@ -312,18 +280,18 @@ public void visitCheckVisitOfEachElement() throws Exception { "element 6", "element 7", "element 8") - .collect(Collectors.toSet())); + .collect(Collectors.toList())); // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); - queue.push(redis, "element 1"); - queue.push(redis, "element 2"); - queue.push(redis, "element 3"); - queue.push(redis, "element 4"); - queue.push(redis, "element 5"); - queue.push(redis, "element 6"); - queue.push(redis, "element 7"); - queue.push(redis, "element 8"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); + queue.offer("element 1"); + queue.offer("element 2"); + queue.offer("element 3"); + queue.offer("element 4"); + queue.offer("element 5"); + queue.offer("element 6"); + queue.offer("element 7"); + queue.offer("element 8"); // ACT List visited = new ArrayList<>(); @@ -333,7 +301,7 @@ public void visit(String entry) { visited.add(entry); } }; - queue.visit(redis, visitor); + queue.visit(visitor); // ASSERT assertThat(visited.size()).isEqualTo(8); @@ -366,7 +334,7 @@ public void visitDequeueCheckVisitOfEachElement() throws Exception { "element 8")); // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT List visited = new ArrayList<>(); @@ -376,7 +344,7 @@ public void visit(String entry) { visited.add(entry); } }; - queue.visitDequeue(redis, visitor); + queue.visitDequeue(visitor); // ASSERT assertThat(visited.size()).isEqualTo(8); diff --git a/src/test/java/build/buildfarm/common/redis/RedisPriorityQueueTest.java b/src/test/java/build/buildfarm/common/redis/RedisPriorityQueueTest.java index 0f507c3fb6..2845eaa554 100644 --- a/src/test/java/build/buildfarm/common/redis/RedisPriorityQueueTest.java +++ b/src/test/java/build/buildfarm/common/redis/RedisPriorityQueueTest.java @@ -15,20 +15,27 @@ package build.buildfarm.common.redis; import static com.google.common.truth.Truth.assertThat; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoInteractions; import build.buildfarm.common.StringVisitor; import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.instance.shard.JedisClusterFactory; +import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableList; import java.time.Duration; -import java.time.Instant; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.ExecutorService; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisPooled; +import redis.clients.jedis.UnifiedJedis; /** * @class RedisPriorityQueueTest @@ -42,125 +49,115 @@ @RunWith(JUnit4.class) public class RedisPriorityQueueTest { private BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - private JedisCluster redis; + private JedisPooled pooled; + private Jedis redis; @Before public void setUp() throws Exception { configs.getBackplane().setRedisUri("redis://localhost:6379"); - redis = JedisClusterFactory.createTest(); + UnifiedJedis unified = JedisClusterFactory.createTest(); + assertThat(unified).isInstanceOf(JedisPooled.class); + pooled = (JedisPooled) unified; + redis = new Jedis(pooled.getPool().getResource()); } @After public void tearDown() { redis.close(); + pooled.close(); } // Function under test: RedisPriorityQueue // Reason for testing: the queue can be constructed with a valid cluster instance and name - // Failure explanation: the queue is throwing an exception upon construction + // Failure explanation: the queue throws an exception upon construction @Test public void redisPriorityQueueConstructsWithoutError() throws Exception { // ACT - new RedisPriorityQueue("test"); + new RedisPriorityQueue(redis, "test"); } - // Function under test: push - // Reason for testing: the queue can have a value pushed onto it - // Failure explanation: the queue is throwing an exception upon push + // Function under test: offer + // Reason for testing: the queue can have a value offered to it + // Failure explanation: the queue throws an exception upon offer @Test - public void pushPushWithoutError() throws Exception { + public void offerWithoutError() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT - queue.push(redis, "foo"); + queue.offer("foo"); } - // Function under test: push - // Reason for testing: the queue can have the different values pushed onto it - // Failure explanation: the queue is throwing an exception upon pushing different values + // Function under test: offer + // Reason for testing: the queue can have the different values offered to it + // Failure explanation: the queue throws an exception upon offering different values @Test - public void pushPushDifferentWithoutError() throws Exception { + public void offerDifferentWithoutError() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT - queue.push(redis, "foo"); - queue.push(redis, "bar"); + queue.offer("foo"); + queue.offer("bar"); } - // Function under test: push - // Reason for testing: the queue can have the same values pushed onto it - // Failure explanation: the queue is throwing an exception upon pushing the same values + // Function under test: offer + // Reason for testing: the queue can have the same values offered to it + // Failure explanation: the queue throws an exception upon offering the same values @Test public void pushPushSameWithoutError() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT - queue.push(redis, "foo"); - queue.push(redis, "foo"); + queue.offer("foo"); + queue.offer("foo"); } - // Function under test: push - // Reason for testing: the queue can have many values pushed into it - // Failure explanation: the queue is throwing an exception upon pushing many values + // Function under test: offer + // Reason for testing: the queue can have many values offered to it + // Failure explanation: the queue throws an exception upon offering many values @Test public void pushPushMany() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT for (int i = 0; i < 1000; ++i) { - queue.push(redis, "foo" + i); + queue.offer("foo" + i); } } - // Function under test: push - // Reason for testing: the queue size increases as elements are pushed - // Failure explanation: the queue size is not accurately reflecting the pushes + // Function under test: offer + // Reason for testing: the queue size increases as elements are offered + // Failure explanation: the queue size does not reflect offers @Test public void pushPushIncreasesSize() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); // ACT / ASSERT - assertThat(queue.size(redis)).isEqualTo(0); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(1); - queue.push(redis, "foo1"); - assertThat(queue.size(redis)).isEqualTo(2); - queue.push(redis, "foo2"); - assertThat(queue.size(redis)).isEqualTo(3); - queue.push(redis, "foo3"); - assertThat(queue.size(redis)).isEqualTo(4); - queue.push(redis, "foo4"); - assertThat(queue.size(redis)).isEqualTo(5); - queue.push(redis, "foo5"); - assertThat(queue.size(redis)).isEqualTo(6); - queue.push(redis, "foo6"); - assertThat(queue.size(redis)).isEqualTo(7); - queue.push(redis, "foo7"); - assertThat(queue.size(redis)).isEqualTo(8); - queue.push(redis, "foo8"); - assertThat(queue.size(redis)).isEqualTo(9); - queue.push(redis, "foo9"); - assertThat(queue.size(redis)).isEqualTo(10); - } - - // Function under test: getName - // Reason for testing: the name can be received - // Failure explanation: name does not match what it should - @Test - public void getNameNameIsStored() throws Exception { - // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("queue_name"); - - // ACT - String name = queue.getName(); - - // ASSERT - assertThat(name).isEqualTo("queue_name"); + assertThat(queue.size()).isEqualTo(0); + queue.offer("foo"); + assertThat(queue.size()).isEqualTo(1); + queue.offer("foo1"); + assertThat(queue.size()).isEqualTo(2); + queue.offer("foo2"); + assertThat(queue.size()).isEqualTo(3); + queue.offer("foo3"); + assertThat(queue.size()).isEqualTo(4); + queue.offer("foo4"); + assertThat(queue.size()).isEqualTo(5); + queue.offer("foo5"); + assertThat(queue.size()).isEqualTo(6); + queue.offer("foo6"); + assertThat(queue.size()).isEqualTo(7); + queue.offer("foo7"); + assertThat(queue.size()).isEqualTo(8); + queue.offer("foo8"); + assertThat(queue.size()).isEqualTo(9); + queue.offer("foo9"); + assertThat(queue.size()).isEqualTo(10); } // Function under test: getDequeueName @@ -169,7 +166,7 @@ public void getNameNameIsStored() throws Exception { @Test public void getDequeueNameNameIsStored() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("queue_name"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "queue_name"); // ACT String name = queue.getDequeueName(); @@ -179,81 +176,79 @@ public void getDequeueNameNameIsStored() throws Exception { } // Function under test: size - // Reason for testing: size adjusts with push and dequeue + // Reason for testing: size adjusts with offer(and dequeue // Failure explanation: size is incorrectly reporting the expected queue size @Test public void sizeAdjustPushDequeue() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); + ExecutorService service = mock(ExecutorService.class); + Duration timeout = Duration.ofSeconds(1); // ACT / ASSERT - assertThat(queue.size(redis)).isEqualTo(0); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(1); - queue.push(redis, "bar"); - assertThat(queue.size(redis)).isEqualTo(2); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(3); - queue.push(redis, "baz2"); - assertThat(queue.size(redis)).isEqualTo(4); - queue.push(redis, "baz3"); - assertThat(queue.size(redis)).isEqualTo(5); - queue.push(redis, "baz4"); - assertThat(queue.size(redis)).isEqualTo(6); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(5); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(4); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(3); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(2); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(1); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(0); + assertThat(queue.size()).isEqualTo(0); + queue.offer("foo"); + assertThat(queue.size()).isEqualTo(1); + queue.offer("bar"); + assertThat(queue.size()).isEqualTo(2); + queue.offer("baz"); + assertThat(queue.size()).isEqualTo(3); + queue.offer("baz2"); + assertThat(queue.size()).isEqualTo(4); + queue.offer("baz3"); + assertThat(queue.size()).isEqualTo(5); + queue.offer("baz4"); + assertThat(queue.size()).isEqualTo(6); + queue.take(timeout); + assertThat(queue.size()).isEqualTo(5); + queue.take(timeout); + assertThat(queue.size()).isEqualTo(4); + queue.take(timeout); + assertThat(queue.size()).isEqualTo(3); + queue.take(timeout); + assertThat(queue.size()).isEqualTo(2); + queue.take(timeout); + assertThat(queue.size()).isEqualTo(1); + queue.take(timeout); + assertThat(queue.size()).isEqualTo(0); + verifyNoInteractions(service); } // Function under test: size - // Reason for testing: size adjusts with push and dequeue + // Reason for testing: size adjusts with offer(and take(timeout); // Failure explanation: size is incorrectly reporting the expected queue size @Test public void checkPriorityOnDequeue() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); - String val; + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); + ExecutorService service = mock(ExecutorService.class); + Duration timeout = Duration.ofSeconds(1); // ACT / ASSERT - assertThat(queue.size(redis)).isEqualTo(0); - queue.push(redis, "foo", 2); - assertThat(queue.size(redis)).isEqualTo(1); - queue.push(redis, "bar", 1); - assertThat(queue.size(redis)).isEqualTo(2); - queue.push(redis, "baz", 3); - assertThat(queue.size(redis)).isEqualTo(3); - queue.push(redis, "baz2", 1); - assertThat(queue.size(redis)).isEqualTo(4); - queue.push(redis, "baz3", 2); - assertThat(queue.size(redis)).isEqualTo(5); - queue.push(redis, "baz4", 1); - assertThat(queue.size(redis)).isEqualTo(6); - val = queue.dequeue(redis, 1); - assertThat(val).isEqualTo("bar"); - assertThat(queue.size(redis)).isEqualTo(5); - val = queue.dequeue(redis, 1); - assertThat(val).isEqualTo("baz2"); - assertThat(queue.size(redis)).isEqualTo(4); - val = queue.dequeue(redis, 1); - assertThat(val).isEqualTo("baz4"); - assertThat(queue.size(redis)).isEqualTo(3); - val = queue.dequeue(redis, 1); - assertThat(val).isEqualTo("foo"); - assertThat(queue.size(redis)).isEqualTo(2); - val = queue.dequeue(redis, 1); - assertThat(val).isEqualTo("baz3"); - assertThat(queue.size(redis)).isEqualTo(1); - val = queue.dequeue(redis, 1); - assertThat(val).isEqualTo("baz"); - assertThat(queue.size(redis)).isEqualTo(0); + assertThat(queue.size()).isEqualTo(0); + queue.offer("prio_2_1", 2); + assertThat(queue.size()).isEqualTo(1); + queue.offer("prio_1_1", 1); + assertThat(queue.size()).isEqualTo(2); + queue.offer("prio_3_1", 3); + assertThat(queue.size()).isEqualTo(3); + queue.offer("prio_1_2", 1); + assertThat(queue.size()).isEqualTo(4); + queue.offer("prio_2_2", 2); + assertThat(queue.size()).isEqualTo(5); + queue.offer("prio_1_3", 1); + assertThat(queue.size()).isEqualTo(6); + // priority 1 + assertThat(ImmutableList.of(queue.take(timeout), queue.take(timeout), queue.take(timeout))) + .containsExactly("prio_1_1", "prio_1_2", "prio_1_3"); + assertThat(queue.size()).isEqualTo(3); + // priority 2 + assertThat(ImmutableList.of(queue.take(timeout), queue.take(timeout))) + .containsExactly("prio_2_1", "prio_2_2"); + assertThat(queue.size()).isEqualTo(1); + // priority 3 + assertThat(ImmutableList.of(queue.take(timeout))).containsExactly("prio_3_1"); + assertThat(queue.size()).isEqualTo(0); + verifyNoInteractions(service); } // Function under test: dequeue @@ -262,15 +257,16 @@ public void checkPriorityOnDequeue() throws Exception { @Test public void checkDequeueTimeout() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); + ExecutorService service = mock(ExecutorService.class); - Instant start = Instant.now(); - String val = queue.dequeue(redis, 1); - Instant finish = Instant.now(); + Stopwatch stopwatch = Stopwatch.createStarted(); + String val = queue.take(Duration.ofSeconds(1)); + long timeElapsed = stopwatch.elapsed(MILLISECONDS); - long timeElapsed = Duration.between(start, finish).toMillis(); assertThat(timeElapsed).isGreaterThan(1000L); assertThat(val).isEqualTo(null); + verifyNoInteractions(service); } // Function under test: dequeue @@ -279,35 +275,38 @@ public void checkDequeueTimeout() throws Exception { @Test public void checkNegativesInPriority() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); + ExecutorService service = mock(ExecutorService.class); + Duration timeout = Duration.ofSeconds(1); String val; // ACT / ASSERT - queue.push(redis, "foo-6", 6); - queue.push(redis, "foo-5", 5); - queue.push(redis, "foo-3", 3); - queue.push(redis, "negative-50", -50); - queue.push(redis, "negative-1", -1); - queue.push(redis, "foo-1", 1); - queue.push(redis, "baz-2", 2); - queue.push(redis, "foo-4", 4); - - val = queue.dequeue(redis, 1); + queue.offer("foo-6", 6); + queue.offer("foo-5", 5); + queue.offer("foo-3", 3); + queue.offer("negative-50", -50); + queue.offer("negative-1", -1); + queue.offer("foo-1", 1); + queue.offer("baz-2", 2); + queue.offer("foo-4", 4); + + val = queue.take(timeout); assertThat(val).isEqualTo("negative-50"); - val = queue.dequeue(redis, 1); + val = queue.take(timeout); assertThat(val).isEqualTo("negative-1"); - val = queue.dequeue(redis, 1); + val = queue.take(timeout); assertThat(val).isEqualTo("foo-1"); - val = queue.dequeue(redis, 1); + val = queue.take(timeout); assertThat(val).isEqualTo("baz-2"); - val = queue.dequeue(redis, 1); + val = queue.take(timeout); assertThat(val).isEqualTo("foo-3"); - val = queue.dequeue(redis, 1); + val = queue.take(timeout); assertThat(val).isEqualTo("foo-4"); - val = queue.dequeue(redis, 1); + val = queue.take(timeout); assertThat(val).isEqualTo("foo-5"); - val = queue.dequeue(redis, 1); + val = queue.take(timeout); assertThat(val).isEqualTo("foo-6"); + verifyNoInteractions(service); } // Function under test: visit @@ -316,15 +315,15 @@ public void checkNegativesInPriority() throws Exception { @Test public void visitCheckVisitOfEachElement() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); - queue.push(redis, "element 1"); - queue.push(redis, "element 2"); - queue.push(redis, "element 3"); - queue.push(redis, "element 4"); - queue.push(redis, "element 5"); - queue.push(redis, "element 6"); - queue.push(redis, "element 7"); - queue.push(redis, "element 8"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); + queue.offer("element 1"); + queue.offer("element 2"); + queue.offer("element 3"); + queue.offer("element 4"); + queue.offer("element 5"); + queue.offer("element 6"); + queue.offer("element 7"); + queue.offer("element 8"); // ACT List visited = new ArrayList<>(); @@ -334,7 +333,7 @@ public void visit(String entry) { visited.add(entry); } }; - queue.visit(redis, visitor); + queue.visit(visitor); // ASSERT assertThat(visited.size()).isEqualTo(8); @@ -354,9 +353,9 @@ public void visit(String entry) { @Test public void visitVisitManyOverPageSize() throws Exception { // ARRANGE - RedisPriorityQueue queue = new RedisPriorityQueue("test"); + RedisPriorityQueue queue = new RedisPriorityQueue(redis, "test"); for (int i = 0; i < 2500; ++i) { - queue.push(redis, "foo" + i); + queue.offer("foo" + i); } // ACT @@ -367,7 +366,7 @@ public void visit(String entry) { visited.add(entry); } }; - queue.visit(redis, visitor); + queue.visit(visitor); // ASSERT assertThat(visited.size()).isEqualTo(2500); diff --git a/src/test/java/build/buildfarm/common/redis/RedisQueueMockTest.java b/src/test/java/build/buildfarm/common/redis/RedisQueueMockTest.java index c0cf68e61b..947b5cef1a 100644 --- a/src/test/java/build/buildfarm/common/redis/RedisQueueMockTest.java +++ b/src/test/java/build/buildfarm/common/redis/RedisQueueMockTest.java @@ -14,355 +14,146 @@ package build.buildfarm.common.redis; -import static com.google.common.truth.Truth.assertThat; -import static org.mockito.Mockito.any; +import static com.google.common.collect.Iterables.partition; +import static com.google.common.collect.Lists.newArrayList; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static redis.clients.jedis.args.ListDirection.LEFT; +import static redis.clients.jedis.args.ListDirection.RIGHT; import build.buildfarm.common.StringVisitor; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; +import com.google.common.collect.ImmutableList; +import java.time.Duration; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import redis.clients.jedis.JedisCluster; - -/** - * @class RedisQueueMockTest - * @brief tests A redis queue. - * @details A redis queue is an implementation of a queue data structure which internally uses redis - * to store and distribute the data. Its important to know that the lifetime of the queue - * persists before and after the queue data structure is created (since it exists in redis). - * Therefore, two redis queues with the same name, would in fact be the same underlying redis - * queue. - */ +import redis.clients.jedis.Jedis; + @RunWith(JUnit4.class) public class RedisQueueMockTest { - @Mock private JedisCluster redis; + @Mock private Jedis redis; @Before public void setUp() { MockitoAnnotations.initMocks(this); } - // Function under test: redisQueue - // Reason for testing: the queue can be constructed with a valid cluster instance and name - // Failure explanation: the queue is throwing an exception upon construction @Test - public void redisQueueConstructsWithoutError() throws Exception { - // ACT - new RedisQueue("test"); + public void decorateSucceeds() { + RedisQueue.decorate(redis, "test"); } - // Function under test: push - // Reason for testing: the queue can have a value pushed onto it - // Failure explanation: the queue is throwing an exception upon push @Test - public void pushPushWithoutError() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); + public void offerShouldLPush() { + RedisQueue queue = new RedisQueue(redis, "test"); - // ACT - queue.push(redis, "foo"); + queue.offer("foo"); - // ASSERT verify(redis, times(1)).lpush("test", "foo"); } - // Function under test: push - // Reason for testing: the queue can have the different values pushed onto it - // Failure explanation: the queue is throwing an exception upon pushing different values @Test - public void pushPushDifferentWithoutError() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); + public void removeFromDequeueShouldLRemTail() { + RedisQueue queue = new RedisQueue(redis, "test"); - // ACT - queue.push(redis, "foo"); - queue.push(redis, "bar"); + queue.removeFromDequeue("foo"); - // ASSERT - verify(redis, times(1)).lpush("test", "foo"); - verify(redis, times(1)).lpush("test", "bar"); + verify(redis, times(1)).lrem(queue.getDequeueName(), -1, "foo"); } - // Function under test: push - // Reason for testing: the queue can have the same values pushed onto it - // Failure explanation: the queue is throwing an exception upon pushing the same values @Test - public void pushPushSameWithoutError() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); + public void removeAllShouldLRemAll() { + RedisQueue queue = new RedisQueue(redis, "test"); - // ACT - queue.push(redis, "foo"); - queue.push(redis, "foo"); + queue.removeAll("foo"); - // ASSERT - verify(redis, times(2)).lpush("test", "foo"); + verify(redis, times(1)).lrem("test", 0, "foo"); } - // Function under test: push - // Reason for testing: the queue can have many values pushed into it - // Failure explanation: the queue is throwing an exception upon pushing many values @Test - public void pushPushMany() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); - - // ACT - for (int i = 0; i < 1000; ++i) { - queue.push(redis, "foo" + i); - } + public void takeShouldBLMove() { + RedisQueue queue = new RedisQueue(redis, "test"); - // ASSERT - for (int i = 0; i < 1000; ++i) { - verify(redis, times(1)).lpush("test", "foo" + i); - } - } + queue.take(Duration.ofMillis(1470)); - // Function under test: push - // Reason for testing: the queue size increases as elements are pushed - // Failure explanation: the queue size is not accurately reflecting the pushes - @Test - public void pushCallsLPush() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); - - // ACT - queue.push(redis, "foo"); - queue.push(redis, "foo"); - queue.push(redis, "foo"); - queue.push(redis, "foo"); - queue.push(redis, "foo"); - queue.push(redis, "foo"); - queue.push(redis, "foo"); - queue.push(redis, "foo"); - queue.push(redis, "foo"); - queue.push(redis, "foo"); - - // ASSERT - verify(redis, times(10)).lpush("test", "foo"); + verify(redis, times(1)).blmove("test", queue.getDequeueName(), RIGHT, LEFT, 1.47); } - // Function under test: removeFromDequeue - // Reason for testing: we can remove an element from the dequeue - // Failure explanation: we are either unable to get an element into the dequeue or unable to - // remove it @Test - public void removeFromDequeueRemoveADequeueValue() throws Exception { - // ARRANGE - when(redis.lrem("test_dequeue", -1, "foo")).thenReturn(1L); - RedisQueue queue = new RedisQueue("test"); + public void pollShouldLMove() { + RedisQueue queue = new RedisQueue(redis, "test"); - // ACT - boolean wasRemoved = queue.removeFromDequeue(redis, "foo"); + queue.poll(); - // ASSERT - assertThat(wasRemoved).isTrue(); - verify(redis, times(1)).lrem("test_dequeue", -1, "foo"); + verify(redis, times(1)).lmove("test", queue.getDequeueName(), RIGHT, LEFT); } - // Function under test: dequeue - // Reason for testing: the element is able to be dequeued - // Failure explanation: something prevented the element from being dequeued @Test - public void dequeueElementCanBeDequeuedWithTimeout() throws Exception { - // ARRANGE - when(redis.brpoplpush("test", "test_dequeue", 1)).thenReturn("foo"); - RedisQueue queue = new RedisQueue("test"); + public void sizeShouldLLen() { + RedisQueue queue = new RedisQueue(redis, "test"); - // ACT - queue.push(redis, "foo"); - String val = queue.dequeue(redis, 1); + queue.size(); - // ASSERT - assertThat(val).isEqualTo("foo"); + verify(redis, times(1)).llen("test"); } - // Function under test: dequeue - // Reason for testing: element is not dequeued - // Failure explanation: element was dequeued - @Test - public void dequeueElementIsNotDequeuedIfTimeRunsOut() throws Exception { - // ARRANGE - when(redis.brpoplpush("test", "test_dequeue", 1)).thenReturn(null); - RedisQueue queue = new RedisQueue("test"); - - // ACT - queue.push(redis, "foo"); - String val = queue.dequeue(redis, 5); - - // ASSERT - assertThat(val).isEqualTo(null); + private void arrangeVisitLRange(String name, int listPageSize, Iterable entries) { + int index = 0; + int nextIndex = listPageSize; + for (Iterable page : partition(entries, listPageSize)) { + when(redis.lrange(name, index, nextIndex - 1)).thenReturn(newArrayList(page)); + index = nextIndex; + nextIndex += listPageSize; + } } - // Function under test: dequeue - // Reason for testing: the dequeue is interrupted - // Failure explanation: the dequeue was not interrupted as expected - @Test - public void dequeueInterrupt() throws Exception { - // ARRANGE - when(redis.brpoplpush("test", "test_dequeue", 1)).thenReturn(null); - RedisQueue queue = new RedisQueue("test"); - - // ACT - queue.push(redis, "foo"); - Thread call = - new Thread( - () -> { - try { - queue.dequeue(redis, 100000); - } catch (Exception e) { - } - }); - call.start(); - call.interrupt(); + private void verifyVisitLRange( + String name, StringVisitor visitor, int listPageSize, Iterable entries) { + int pageCount = listPageSize; + int index = 0; + int nextIndex = listPageSize; + for (String entry : entries) { + verify(visitor, times(1)).visit(entry); + if (--pageCount == 0) { + verify(redis, times(1)).lrange(name, index, nextIndex - 1); + index = nextIndex; + nextIndex += listPageSize; + pageCount = listPageSize; + } + } + if (pageCount != 0) { + verify(redis, times(1)).lrange(name, index, nextIndex - 1); + } } - // Function under test: nonBlockingDequeue - // Reason for testing: the element is able to be dequeued - // Failure explanation: something prevented the element from being dequeued - @Test - public void nonBlockingDequeueElementCanBeDequeued() throws Exception { - // ARRANGE - when(redis.rpoplpush("test", "test_dequeue")).thenReturn("foo"); - RedisQueue queue = new RedisQueue("test"); - - // ACT - queue.push(redis, "foo"); - String val = queue.nonBlockingDequeue(redis); - - // ASSERT - assertThat(val).isEqualTo("foo"); - } + private final Iterable VISIT_ENTRIES = ImmutableList.of("one", "two", "three", "four"); - // Function under test: getName - // Reason for testing: the name can be received - // Failure explanation: name does not match what it should @Test - public void getNameNameIsStored() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("queue_name"); + public void visitShouldLRange() { + int listPageSize = 3; + RedisQueue queue = new RedisQueue(redis, "test", listPageSize); + arrangeVisitLRange("test", listPageSize, VISIT_ENTRIES); + StringVisitor visitor = mock(StringVisitor.class); - // ACT - String name = queue.getName(); + queue.visit(visitor); - // ASSERT - assertThat(name).isEqualTo("queue_name"); + verifyVisitLRange("test", visitor, listPageSize, VISIT_ENTRIES); } - // Function under test: getDequeueName - // Reason for testing: the name can be received - // Failure explanation: name does not match what it should @Test - public void getDequeueNameNameIsStored() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("queue_name"); + public void visitDequeueShouldLRange() { + int listPageSize = 3; + RedisQueue queue = new RedisQueue(redis, "test", listPageSize); + arrangeVisitLRange(queue.getDequeueName(), listPageSize, VISIT_ENTRIES); + StringVisitor visitor = mock(StringVisitor.class); - // ACT - String name = queue.getDequeueName(); + queue.visitDequeue(visitor); - // ASSERT - assertThat(name).isEqualTo("queue_name_dequeue"); - } - - // Function under test: visit - // Reason for testing: each element in the queue can be visited - // Failure explanation: we are unable to visit each element in the queue - @Test - public void visitCheckVisitOfEachElement() throws Exception { - // MOCK - when(redis.lrange(any(String.class), any(Long.class), any(Long.class))) - .thenReturn( - Arrays.asList( - "element 1", - "element 2", - "element 3", - "element 4", - "element 5", - "element 6", - "element 7", - "element 8")); - - // ARRANGE - RedisQueue queue = new RedisQueue("test"); - queue.push(redis, "element 1"); - queue.push(redis, "element 2"); - queue.push(redis, "element 3"); - queue.push(redis, "element 4"); - queue.push(redis, "element 5"); - queue.push(redis, "element 6"); - queue.push(redis, "element 7"); - queue.push(redis, "element 8"); - - // ACT - List visited = new ArrayList<>(); - StringVisitor visitor = - new StringVisitor() { - public void visit(String entry) { - visited.add(entry); - } - }; - queue.visit(redis, visitor); - - // ASSERT - assertThat(visited.size()).isEqualTo(8); - assertThat(visited.contains("element 1")).isTrue(); - assertThat(visited.contains("element 2")).isTrue(); - assertThat(visited.contains("element 3")).isTrue(); - assertThat(visited.contains("element 4")).isTrue(); - assertThat(visited.contains("element 5")).isTrue(); - assertThat(visited.contains("element 6")).isTrue(); - assertThat(visited.contains("element 7")).isTrue(); - assertThat(visited.contains("element 8")).isTrue(); - } - - // Function under test: visitDequeue - // Reason for testing: each element in the queue can be visited - // Failure explanation: we are unable to visit each element in the queue - @Test - public void visitDequeueCheckVisitOfEachElement() throws Exception { - // MOCK - when(redis.lrange(any(String.class), any(Long.class), any(Long.class))) - .thenReturn( - Arrays.asList( - "element 1", - "element 2", - "element 3", - "element 4", - "element 5", - "element 6", - "element 7", - "element 8")); - - // ARRANGE - RedisQueue queue = new RedisQueue("test"); - - // ACT - List visited = new ArrayList<>(); - StringVisitor visitor = - new StringVisitor() { - public void visit(String entry) { - visited.add(entry); - } - }; - queue.visitDequeue(redis, visitor); - - // ASSERT - assertThat(visited.size()).isEqualTo(8); - assertThat(visited.contains("element 1")).isTrue(); - assertThat(visited.contains("element 2")).isTrue(); - assertThat(visited.contains("element 3")).isTrue(); - assertThat(visited.contains("element 4")).isTrue(); - assertThat(visited.contains("element 5")).isTrue(); - assertThat(visited.contains("element 6")).isTrue(); - assertThat(visited.contains("element 7")).isTrue(); - assertThat(visited.contains("element 8")).isTrue(); + verifyVisitLRange(queue.getDequeueName(), visitor, listPageSize, VISIT_ENTRIES); } } diff --git a/src/test/java/build/buildfarm/common/redis/RedisQueueTest.java b/src/test/java/build/buildfarm/common/redis/RedisQueueTest.java index 23e46ca285..c220684b9f 100644 --- a/src/test/java/build/buildfarm/common/redis/RedisQueueTest.java +++ b/src/test/java/build/buildfarm/common/redis/RedisQueueTest.java @@ -15,18 +15,23 @@ package build.buildfarm.common.redis; import static com.google.common.truth.Truth.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import build.buildfarm.common.StringVisitor; import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.instance.shard.JedisClusterFactory; -import java.util.ArrayList; -import java.util.List; +import com.google.common.collect.ImmutableList; +import java.time.Duration; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisPooled; +import redis.clients.jedis.UnifiedJedis; /** * @class RedisQueueTest @@ -40,240 +45,155 @@ @RunWith(JUnit4.class) public class RedisQueueTest { private BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - private JedisCluster redis; + private JedisPooled pooled; + private Jedis redis; @Before public void setUp() throws Exception { configs.getBackplane().setRedisUri("redis://localhost:6379"); - redis = JedisClusterFactory.createTest(); + UnifiedJedis unified = JedisClusterFactory.createTest(); + assertThat(unified).isInstanceOf(JedisPooled.class); + pooled = (JedisPooled) unified; + redis = new Jedis(pooled.getPool().getResource()); + redis.flushDB(); } @After public void tearDown() { + redis.flushDB(); redis.close(); + pooled.close(); } - // Function under test: RedisQueue - // Reason for testing: the queue can be constructed with a valid cluster instance and name - // Failure explanation: the queue is throwing an exception upon construction @Test - public void redisQueueConstructsWithoutError() throws Exception { - // ACT - new RedisQueue("test"); + public void decorateSucceeds() { + RedisQueue.decorate(redis, "test"); } - // Function under test: push - // Reason for testing: the queue can have a value pushed onto it - // Failure explanation: the queue is throwing an exception upon push @Test - public void pushPushWithoutError() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); + public void offerShouldContain() { + RedisQueue queue = new RedisQueue(redis, "test"); - // ACT - queue.push(redis, "foo"); + queue.offer("foo"); + + assertThat(redis.lrange("test", 0, -1)).containsExactly("foo"); } - // Function under test: push - // Reason for testing: the queue can have the different values pushed onto it - // Failure explanation: the queue is throwing an exception upon pushing different values @Test - public void pushPushDifferentWithoutError() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); + public void removeFromDequeueShouldExclude() { + RedisQueue queue = new RedisQueue(redis, "test"); + redis.lpush(queue.getDequeueName(), "foo", "bar", "foo"); + + queue.removeFromDequeue("foo"); - // ACT - queue.push(redis, "foo"); - queue.push(redis, "bar"); + assertThat(redis.lrange(queue.getDequeueName(), 0, -1)).containsExactly("foo", "bar").inOrder(); } - // Function under test: push - // Reason for testing: the queue can have the same values pushed onto it - // Failure explanation: the queue is throwing an exception upon pushing the same values @Test - public void pushPushSameWithoutError() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); + public void removeAllShouldExclude() { + RedisQueue queue = new RedisQueue(redis, "test"); + redis.lpush("test", "foo", "bar", "foo"); - // ACT - queue.push(redis, "foo"); - queue.push(redis, "foo"); + queue.removeAll("foo"); + + assertThat(redis.lrange("test", 0, -1)).containsExactly("bar"); } - // Function under test: push - // Reason for testing: the queue can have many values pushed into it - // Failure explanation: the queue is throwing an exception upon pushing many values @Test - public void pushPushMany() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); + public void takeShouldPrependToDequeue() { + RedisQueue queue = new RedisQueue(redis, "test"); + redis.lpush("test", "foo", "bar", "foo"); + redis.lpush(queue.getDequeueName(), "baz"); - // ACT - for (int i = 0; i < 1000; ++i) { - queue.push(redis, "foo" + i); - } + String value = queue.take(Duration.ofMillis(1)); + + assertThat(value).isEqualTo("foo"); + assertThat(redis.lrange("test", 0, -1)).containsExactly("foo", "bar").inOrder(); + assertThat(redis.lrange(queue.getDequeueName(), 0, -1)).containsExactly("foo", "baz").inOrder(); } - // Function under test: push - // Reason for testing: the queue size increases as elements are pushed - // Failure explanation: the queue size is not accurately reflecting the pushes @Test - public void pushPushIncreasesSize() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); - - // ACT / ASSERT - assertThat(queue.size(redis)).isEqualTo(0); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(1); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(2); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(3); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(4); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(5); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(6); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(7); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(8); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(9); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(10); + public void takeEmptyShouldReturnNullAfterTimeoutAndIgnoreDequeue() { + RedisQueue queue = new RedisQueue(redis, "test"); + redis.lpush(queue.getDequeueName(), "foo"); + + String value = queue.take(Duration.ofMillis(1)); + + assertThat(value).isNull(); + assertThat(redis.lrange("test", 0, -1)).isEmpty(); + assertThat(redis.lrange(queue.getDequeueName(), 0, -1)).containsExactly("foo"); } - // Function under test: getName - // Reason for testing: the name can be received - // Failure explanation: name does not match what it should @Test - public void getNameNameIsStored() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("queue_name"); + public void pollShouldPrependToDequeue() { + RedisQueue queue = new RedisQueue(redis, "test"); + redis.lpush("test", "foo", "bar", "foo"); + redis.lpush(queue.getDequeueName(), "baz"); - // ACT - String name = queue.getName(); + String value = queue.poll(); - // ASSERT - assertThat(name).isEqualTo("queue_name"); + assertThat(value).isEqualTo("foo"); + assertThat(redis.lrange("test", 0, -1)).containsExactly("foo", "bar").inOrder(); + assertThat(redis.lrange(queue.getDequeueName(), 0, -1)).containsExactly("foo", "baz").inOrder(); } - // Function under test: getDequeueName - // Reason for testing: the name can be received - // Failure explanation: name does not match what it should @Test - public void getDequeueNameNameIsStored() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("queue_name"); + public void pollEmptyShouldReturnNullAndIgnoreDequeue() { + RedisQueue queue = new RedisQueue(redis, "test"); + redis.lpush(queue.getDequeueName(), "foo"); - // ACT - String name = queue.getDequeueName(); + String value = queue.poll(); - // ASSERT - assertThat(name).isEqualTo("queue_name_dequeue"); + assertThat(value).isNull(); + assertThat(redis.lrange("test", 0, -1)).isEmpty(); + assertThat(redis.lrange(queue.getDequeueName(), 0, -1)).containsExactly("foo"); } - // Function under test: size - // Reason for testing: size adjusts with push and dequeue - // Failure explanation: size is incorrectly reporting the expected queue size @Test - public void sizeAdjustPushDequeue() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("{hash}test"); - // ACT / ASSERT - assertThat(queue.size(redis)).isEqualTo(0); - queue.push(redis, "foo"); - assertThat(queue.size(redis)).isEqualTo(1); - queue.push(redis, "bar"); - assertThat(queue.size(redis)).isEqualTo(2); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(3); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(4); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(5); - queue.push(redis, "baz"); - assertThat(queue.size(redis)).isEqualTo(6); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(5); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(4); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(3); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(2); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(1); - queue.dequeue(redis, 1); - assertThat(queue.size(redis)).isEqualTo(0); + public void sizeYieldsLengthAndIgnoresDequeue() { + RedisQueue queue = new RedisQueue(redis, "test"); + + redis.lpush(queue.getDequeueName(), "foo"); + assertThat(queue.size()).isEqualTo(0); + redis.lpush("test", "bar"); + assertThat(queue.size()).isEqualTo(1); + redis.lpush("test", "baz"); + assertThat(queue.size()).isEqualTo(2); } - // Function under test: visit - // Reason for testing: each element in the queue can be visited - // Failure explanation: we are unable to visit each element in the queue + private final Iterable VISIT_ENTRIES = ImmutableList.of("one", "two", "three", "four"); + @Test - public void visitCheckVisitOfEachElement() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); - queue.push(redis, "element 1"); - queue.push(redis, "element 2"); - queue.push(redis, "element 3"); - queue.push(redis, "element 4"); - queue.push(redis, "element 5"); - queue.push(redis, "element 6"); - queue.push(redis, "element 7"); - queue.push(redis, "element 8"); - - // ACT - List visited = new ArrayList<>(); - StringVisitor visitor = - new StringVisitor() { - public void visit(String entry) { - visited.add(entry); - } - }; - queue.visit(redis, visitor); - - // ASSERT - assertThat(visited.size()).isEqualTo(8); - assertThat(visited.contains("element 1")).isTrue(); - assertThat(visited.contains("element 2")).isTrue(); - assertThat(visited.contains("element 3")).isTrue(); - assertThat(visited.contains("element 4")).isTrue(); - assertThat(visited.contains("element 5")).isTrue(); - assertThat(visited.contains("element 6")).isTrue(); - assertThat(visited.contains("element 7")).isTrue(); - assertThat(visited.contains("element 8")).isTrue(); + public void visitShouldEnumerateAndIgnoreDequeue() { + int listPageSize = 3; + RedisQueue queue = new RedisQueue(redis, "test", listPageSize); + redis.lpush(queue.getDequeueName(), "processing"); + for (String entry : VISIT_ENTRIES) { + redis.lpush("test", entry); + } + StringVisitor visitor = mock(StringVisitor.class); + + queue.visit(visitor); + + for (String entry : VISIT_ENTRIES) { + verify(visitor, times(1)).visit(entry); + } } - // Function under test: visit - // Reason for testing: add and visit many elements - // Failure explanation: we are unable to visit all the elements when there are many of them @Test - public void visitVisitManyOverPageSize() throws Exception { - // ARRANGE - RedisQueue queue = new RedisQueue("test"); - for (int i = 0; i < 2500; ++i) { - queue.push(redis, "foo" + i); + public void visitDequeueShouldEnumerateAndIgnoreQueue() { + int listPageSize = 3; + RedisQueue queue = new RedisQueue(redis, "test", listPageSize); + redis.lpush("test", "processing"); + for (String entry : VISIT_ENTRIES) { + redis.lpush(queue.getDequeueName(), entry); } + StringVisitor visitor = mock(StringVisitor.class); + + queue.visitDequeue(visitor); - // ACT - List visited = new ArrayList<>(); - StringVisitor visitor = - new StringVisitor() { - public void visit(String entry) { - visited.add(entry); - } - }; - queue.visit(redis, visitor); - - // ASSERT - assertThat(visited.size()).isEqualTo(2500); - for (int i = 0; i < 2500; ++i) { - assertThat(visited.contains("foo" + i)).isTrue(); + for (String entry : VISIT_ENTRIES) { + verify(visitor, times(1)).visit(entry); } } } diff --git a/src/test/java/build/buildfarm/common/redis/RedisSlotToHashTest.java b/src/test/java/build/buildfarm/common/redis/RedisSlotToHashTest.java index 4fd08de251..7cb7e59f43 100644 --- a/src/test/java/build/buildfarm/common/redis/RedisSlotToHashTest.java +++ b/src/test/java/build/buildfarm/common/redis/RedisSlotToHashTest.java @@ -15,8 +15,9 @@ package build.buildfarm.common.redis; import static com.google.common.truth.Truth.assertThat; -import static redis.clients.jedis.JedisCluster.HASHSLOTS; +import static redis.clients.jedis.Protocol.CLUSTER_HASHSLOTS; +import com.google.common.collect.ImmutableList; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -41,7 +42,7 @@ public class RedisSlotToHashTest { // slot numbers to strings is incorrect @Test public void correlateCorrectForEverySlot() throws Exception { - for (int i = 0; i < HASHSLOTS; ++i) { + for (int i = 0; i < CLUSTER_HASHSLOTS; ++i) { // convert to hashtag String hashtag = RedisSlotToHash.correlate(i); @@ -82,7 +83,9 @@ public void correlateRangeCorrectHashtagFoundForSlotRange() throws Exception { @Test public void correlateRangeWithPrefixCorrectHashtagFoundForSlotRange() throws Exception { // convert to hashtag - String hashtag = RedisSlotToHash.correlateRangeWithPrefix(100, 200, "Execution"); + String hashtag = + RedisSlotToHash.correlateRangesWithPrefix( + ImmutableList.of(ImmutableList.of(100l, 200l)), "Execution"); // convert hashtag back to slot int slotNumber = JedisClusterCRC16.getSlot(hashtag); diff --git a/src/test/java/build/buildfarm/common/resources/BUILD b/src/test/java/build/buildfarm/common/resources/BUILD index 3457160a4f..824240fa73 100644 --- a/src/test/java/build/buildfarm/common/resources/BUILD +++ b/src/test/java/build/buildfarm/common/resources/BUILD @@ -9,9 +9,8 @@ java_test( "//src/main/java/build/buildfarm/common/resources:resource_java_proto", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", - "//third_party/jedis", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_jimfs_jimfs", "@maven//:com_google_protobuf_protobuf_java", @@ -22,6 +21,7 @@ java_test( "@maven//:io_grpc_grpc_stub", "@maven//:io_grpc_grpc_testing", "@maven//:org_mockito_mockito_core", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/test/java/build/buildfarm/common/services/BUILD b/src/test/java/build/buildfarm/common/services/BUILD index 2ba54ab58a..87f9f943b1 100644 --- a/src/test/java/build/buildfarm/common/services/BUILD +++ b/src/test/java/build/buildfarm/common/services/BUILD @@ -14,15 +14,17 @@ java_test( "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/stub", "//src/test/java/build/buildfarm:test_runner", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_truth_truth", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_inprocess", "@maven//:io_grpc_grpc_stub", "@maven//:org_mockito_mockito_core", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/test/java/build/buildfarm/common/services/ByteStreamServiceTest.java b/src/test/java/build/buildfarm/common/services/ByteStreamServiceTest.java index 701f6ae294..72f3c46bbe 100644 --- a/src/test/java/build/buildfarm/common/services/ByteStreamServiceTest.java +++ b/src/test/java/build/buildfarm/common/services/ByteStreamServiceTest.java @@ -164,7 +164,7 @@ public boolean isReady() { HashCode hash = HashCode.fromString(digest.getHash()); String resourceName = ByteStreamUploader.uploadResourceName( - /* instanceName=*/ null, uuid, hash, digest.getSizeBytes()); + /* instanceName= */ null, uuid, hash, digest.getSizeBytes()); Channel channel = InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(); ByteStreamStub service = ByteStreamGrpc.newStub(channel); @@ -178,6 +178,7 @@ public boolean isReady() { .setResourceName(resourceName) .setData(shortContent) .build()); + verify(write, times(1)).reset(); requestObserver.onNext( WriteRequest.newBuilder().setWriteOffset(0).setData(content).setFinishWrite(true).build()); assertThat(futureResponder.get()) @@ -186,7 +187,7 @@ public boolean isReady() { verify(write, atLeastOnce()).getCommittedSize(); verify(write, atLeastOnce()) .getOutput(any(Long.class), any(TimeUnit.class), any(Runnable.class)); - verify(write, times(1)).reset(); + verify(write, times(2)).reset(); verify(write, times(1)).getFuture(); } @@ -240,7 +241,7 @@ public boolean isReady() { HashCode hash = HashCode.fromString(digest.getHash()); String resourceName = ByteStreamUploader.uploadResourceName( - /* instanceName=*/ null, uuid, hash, digest.getSizeBytes()); + /* instanceName= */ null, uuid, hash, digest.getSizeBytes()); Channel channel = InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(); ByteStreamStub service = ByteStreamGrpc.newStub(channel); diff --git a/src/test/java/build/buildfarm/common/services/WriteStreamObserverTest.java b/src/test/java/build/buildfarm/common/services/WriteStreamObserverTest.java index 13f83c8423..4a3c864b24 100644 --- a/src/test/java/build/buildfarm/common/services/WriteStreamObserverTest.java +++ b/src/test/java/build/buildfarm/common/services/WriteStreamObserverTest.java @@ -8,7 +8,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; import build.bazel.remote.execution.v2.Compressor; @@ -29,6 +29,7 @@ import io.grpc.Context; import io.grpc.Context.CancellableContext; import io.grpc.stub.StreamObserver; +import java.io.IOException; import java.util.UUID; import java.util.concurrent.TimeUnit; import org.junit.Test; @@ -91,6 +92,55 @@ public void cancelledBeforeGetOutputIsSilent() throws Exception { any(RequestMetadata.class)); verify(write, times(1)).getOutput(any(Long.class), any(TimeUnit.class), any(Runnable.class)); verify(out, times(1)).close(); - verifyZeroInteractions(responseObserver); + verifyNoInteractions(responseObserver); + } + + @Test + public void noErrorWhenContextCancelled() throws Exception { + CancellableContext context = Context.current().withCancellation(); + Instance instance = mock(Instance.class); + StreamObserver responseObserver = mock(StreamObserver.class); + ByteString cancelled = ByteString.copyFromUtf8("cancelled data"); + Digest cancelledDigest = DIGEST_UTIL.compute(cancelled); + UUID uuid = UUID.randomUUID(); + UploadBlobRequest uploadBlobRequest = + UploadBlobRequest.newBuilder() + .setBlob(BlobInformation.newBuilder().setDigest(cancelledDigest)) + .setUuid(uuid.toString()) + .build(); + SettableFuture future = SettableFuture.create(); + Write write = mock(Write.class); + when(write.getFuture()).thenReturn(future); + when(write.isComplete()).thenReturn(Boolean.TRUE); + when(instance.getBlobWrite( + eq(Compressor.Value.IDENTITY), + eq(cancelledDigest), + eq(DigestFunction.Value.UNKNOWN), + eq(uuid), + any(RequestMetadata.class))) + .thenReturn(write); + + WriteStreamObserver observer = + context.call( + () -> new WriteStreamObserver(instance, 1, SECONDS, () -> {}, responseObserver)); + context.run( + () -> + observer.onNext( + WriteRequest.newBuilder() + .setResourceName(uploadResourceName(uploadBlobRequest)) + .setData(cancelled) + .build())); + context.cancel(new RuntimeException("Cancelled by test")); + future.setException(new IOException("test cancel")); + + verify(write, times(1)).isComplete(); + verify(instance, times(1)) + .getBlobWrite( + eq(Compressor.Value.IDENTITY), + eq(cancelledDigest), + eq(DigestFunction.Value.UNKNOWN), + eq(uuid), + any(RequestMetadata.class)); + verifyNoInteractions(responseObserver); } } diff --git a/src/test/java/build/buildfarm/examples/BUILD b/src/test/java/build/buildfarm/examples/BUILD index a0204865e5..25014e5b53 100644 --- a/src/test/java/build/buildfarm/examples/BUILD +++ b/src/test/java/build/buildfarm/examples/BUILD @@ -8,9 +8,8 @@ java_test( "//src/main/java/build/buildfarm/common/config", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", - "//third_party/jedis", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_jimfs_jimfs", "@maven//:com_google_protobuf_protobuf_java", @@ -21,5 +20,6 @@ java_test( "@maven//:io_grpc_grpc_stub", "@maven//:io_grpc_grpc_testing", "@maven//:org_mockito_mockito_core", + "@maven//:redis_clients_jedis", ], ) diff --git a/src/test/java/build/buildfarm/examples/ExampleConfigsTest.java b/src/test/java/build/buildfarm/examples/ExampleConfigsTest.java index 6033429f0e..ac1c605e80 100644 --- a/src/test/java/build/buildfarm/examples/ExampleConfigsTest.java +++ b/src/test/java/build/buildfarm/examples/ExampleConfigsTest.java @@ -35,16 +35,14 @@ public void skipWindows() { @Test public void shardWorkerConfig() throws IOException { Path configPath = - Paths.get( - System.getenv("TEST_SRCDIR"), "build_buildfarm", "examples", "config.minimal.yml"); + Paths.get(System.getenv("TEST_SRCDIR"), "_main", "examples", "config.minimal.yml"); BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); configs.loadConfigs(configPath); } @Test public void fullConfig() throws IOException { - Path configPath = - Paths.get(System.getenv("TEST_SRCDIR"), "build_buildfarm", "examples", "config.yml"); + Path configPath = Paths.get(System.getenv("TEST_SRCDIR"), "_main", "examples", "config.yml"); BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); configs.loadConfigs(configPath); } diff --git a/src/test/java/build/buildfarm/instance/server/BUILD b/src/test/java/build/buildfarm/instance/server/BUILD index 39e545d042..4becb0230c 100644 --- a/src/test/java/build/buildfarm/instance/server/BUILD +++ b/src/test/java/build/buildfarm/instance/server/BUILD @@ -13,8 +13,8 @@ java_test( "//src/main/java/build/buildfarm/operations", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", - "@googleapis//:google_longrunning_operations_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_truth_truth", @@ -22,6 +22,6 @@ java_test( "@maven//:io_grpc_grpc_stub", "@maven//:org_mockito_mockito_core", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/test/java/build/buildfarm/instance/server/AbstractServerInstanceTest.java b/src/test/java/build/buildfarm/instance/server/NodeInstanceTest.java similarity index 69% rename from src/test/java/build/buildfarm/instance/server/AbstractServerInstanceTest.java rename to src/test/java/build/buildfarm/instance/server/NodeInstanceTest.java index a8f2f298a2..87ae91c4c9 100644 --- a/src/test/java/build/buildfarm/instance/server/AbstractServerInstanceTest.java +++ b/src/test/java/build/buildfarm/instance/server/NodeInstanceTest.java @@ -16,12 +16,14 @@ import static build.buildfarm.common.Actions.checkPreconditionFailure; import static build.buildfarm.common.Errors.VIOLATION_TYPE_INVALID; -import static build.buildfarm.instance.server.AbstractServerInstance.ACTION_INPUT_ROOT_DIRECTORY_PATH; -import static build.buildfarm.instance.server.AbstractServerInstance.DIRECTORY_NOT_SORTED; -import static build.buildfarm.instance.server.AbstractServerInstance.DUPLICATE_DIRENT; -import static build.buildfarm.instance.server.AbstractServerInstance.INVALID_COMMAND; -import static build.buildfarm.instance.server.AbstractServerInstance.OUTPUT_DIRECTORY_IS_OUTPUT_ANCESTOR; -import static build.buildfarm.instance.server.AbstractServerInstance.OUTPUT_FILE_IS_OUTPUT_ANCESTOR; +import static build.buildfarm.common.Errors.VIOLATION_TYPE_MISSING; +import static build.buildfarm.instance.server.NodeInstance.ACTION_INPUT_ROOT_DIRECTORY_PATH; +import static build.buildfarm.instance.server.NodeInstance.DIRECTORY_NOT_SORTED; +import static build.buildfarm.instance.server.NodeInstance.DUPLICATE_DIRENT; +import static build.buildfarm.instance.server.NodeInstance.INVALID_COMMAND; +import static build.buildfarm.instance.server.NodeInstance.OUTPUT_DIRECTORY_IS_OUTPUT_ANCESTOR; +import static build.buildfarm.instance.server.NodeInstance.OUTPUT_FILE_IS_OUTPUT_ANCESTOR; +import static build.buildfarm.instance.server.NodeInstance.SYMLINK_TARGET_ABSOLUTE; import static com.google.common.truth.Truth.assertThat; import static com.google.common.util.concurrent.Futures.immediateFuture; import static org.mockito.Mockito.any; @@ -42,6 +44,7 @@ import build.bazel.remote.execution.v2.OutputDirectory; import build.bazel.remote.execution.v2.Platform; import build.bazel.remote.execution.v2.RequestMetadata; +import build.bazel.remote.execution.v2.SymlinkNode; import build.bazel.remote.execution.v2.Tree; import build.buildfarm.actioncache.ActionCache; import build.buildfarm.cas.ContentAddressableStorage; @@ -96,10 +99,10 @@ @RunWith(JUnit4.class) @Log -public class AbstractServerInstanceTest { +public class NodeInstanceTest { private static final DigestUtil DIGEST_UTIL = new DigestUtil(HashFunction.SHA256); - static class DummyServerInstance extends AbstractServerInstance { + static class DummyServerInstance extends NodeInstance { DummyServerInstance( ContentAddressableStorage contentAddressableStorage, ActionCache actionCache) { super( @@ -107,14 +110,14 @@ static class DummyServerInstance extends AbstractServerInstance { /* digestUtil= */ DIGEST_UTIL, contentAddressableStorage, actionCache, - /* outstandingOperations=*/ null, - /* completedOperations=*/ null, - /* activeBlobWrites=*/ null, + /* outstandingOperations= */ null, + /* completedOperations= */ null, + /* activeBlobWrites= */ null, false); } DummyServerInstance() { - this(/* contentAddressableStorage=*/ null, /* actionCache=*/ null); + this(/* contentAddressableStorage= */ null, /* actionCache= */ null); } @Override @@ -258,7 +261,7 @@ public PrepareWorkerForGracefulShutDownRequestResults shutDownWorkerGracefully() @Test public void duplicateFileInputIsInvalid() { PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder(); - AbstractServerInstance.validateActionInputDirectory( + NodeInstance.validateActionInputDirectory( ACTION_INPUT_ROOT_DIRECTORY_PATH, Directory.newBuilder() .addAllFiles( @@ -266,12 +269,13 @@ public void duplicateFileInputIsInvalid() { FileNode.newBuilder().setName("foo").build(), FileNode.newBuilder().setName("foo").build())) .build(), - /* pathDigests=*/ new Stack<>(), - /* visited=*/ Sets.newHashSet(), - /* directoriesIndex=*/ Maps.newHashMap(), - /* onInputFile=*/ file -> {}, - /* onInputDirectorie=*/ directory -> {}, - /* onInputDigest=*/ digest -> {}, + /* pathDigests= */ new Stack<>(), + /* visited= */ Sets.newHashSet(), + /* directoriesIndex= */ Maps.newHashMap(), + /* allowSymlinkTargetAbsolute= */ false, + /* onInputFile= */ file -> {}, + /* onInputDirectorie= */ directory -> {}, + /* onInputDigest= */ digest -> {}, preconditionFailure); assertThat(preconditionFailure.getViolationsCount()).isEqualTo(1); @@ -286,7 +290,7 @@ public void duplicateEmptyDirectoryCheckPasses() throws StatusException { Directory emptyDirectory = Directory.getDefaultInstance(); Digest emptyDirectoryDigest = DIGEST_UTIL.compute(emptyDirectory); PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder(); - AbstractServerInstance.validateActionInputDirectory( + NodeInstance.validateActionInputDirectory( ACTION_INPUT_ROOT_DIRECTORY_PATH, Directory.newBuilder() .addAllDirectories( @@ -300,12 +304,13 @@ public void duplicateEmptyDirectoryCheckPasses() throws StatusException { .setDigest(emptyDirectoryDigest) .build())) .build(), - /* pathDigests=*/ new Stack<>(), - /* visited=*/ Sets.newHashSet(), - /* directoriesIndex=*/ ImmutableMap.of(Digest.getDefaultInstance(), emptyDirectory), - /* onInputFiles=*/ file -> {}, - /* onInputDirectories=*/ directory -> {}, - /* onInputDigests=*/ digest -> {}, + /* pathDigests= */ new Stack<>(), + /* visited= */ Sets.newHashSet(), + /* directoriesIndex= */ ImmutableMap.of(Digest.getDefaultInstance(), emptyDirectory), + /* allowSymlinkTargetAbsolute= */ false, + /* onInputFiles= */ file -> {}, + /* onInputDirectories= */ directory -> {}, + /* onInputDigests= */ digest -> {}, preconditionFailure); checkPreconditionFailure( @@ -315,7 +320,7 @@ public void duplicateEmptyDirectoryCheckPasses() throws StatusException { @Test public void unsortedFileInputIsInvalid() { PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder(); - AbstractServerInstance.validateActionInputDirectory( + NodeInstance.validateActionInputDirectory( ACTION_INPUT_ROOT_DIRECTORY_PATH, Directory.newBuilder() .addAllFiles( @@ -323,12 +328,13 @@ public void unsortedFileInputIsInvalid() { FileNode.newBuilder().setName("foo").build(), FileNode.newBuilder().setName("bar").build())) .build(), - /* pathDigests=*/ new Stack<>(), - /* visited=*/ Sets.newHashSet(), - /* directoriesIndex=*/ Maps.newHashMap(), - /* onInputFiles=*/ file -> {}, - /* onInputDirectories=*/ directory -> {}, - /* onInputDigests=*/ digest -> {}, + /* pathDigests= */ new Stack<>(), + /* visited= */ Sets.newHashSet(), + /* directoriesIndex= */ Maps.newHashMap(), + /* allowSymlinkTargetAbsolute= */ false, + /* onInputFiles= */ file -> {}, + /* onInputDirectories= */ directory -> {}, + /* onInputDigests= */ digest -> {}, preconditionFailure); assertThat(preconditionFailure.getViolationsCount()).isEqualTo(1); @@ -343,7 +349,7 @@ public void duplicateDirectoryInputIsInvalid() { Directory emptyDirectory = Directory.getDefaultInstance(); Digest emptyDirectoryDigest = DIGEST_UTIL.compute(emptyDirectory); PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder(); - AbstractServerInstance.validateActionInputDirectory( + NodeInstance.validateActionInputDirectory( ACTION_INPUT_ROOT_DIRECTORY_PATH, Directory.newBuilder() .addAllDirectories( @@ -357,15 +363,15 @@ public void duplicateDirectoryInputIsInvalid() { .setDigest(emptyDirectoryDigest) .build())) .build(), - /* pathDigests=*/ new Stack<>(), - /* visited=*/ Sets.newHashSet(), - /* directoriesIndex=*/ ImmutableMap.of(emptyDirectoryDigest, emptyDirectory), - /* onInputFiles=*/ file -> {}, - /* onInputDirectories=*/ directory -> {}, - /* onInputDigests=*/ digest -> {}, + /* pathDigests= */ new Stack<>(), + /* visited= */ Sets.newHashSet(), + /* directoriesIndex= */ ImmutableMap.of(emptyDirectoryDigest, emptyDirectory), + /* allowSymlinkTargetAbsolute= */ false, + /* onInputFiles= */ file -> {}, + /* onInputDirectories= */ directory -> {}, + /* onInputDigests= */ digest -> {}, preconditionFailure); - assertThat(preconditionFailure.getViolationsCount()).isEqualTo(1); assertThat(preconditionFailure.getViolationsCount()).isEqualTo(1); Violation violation = preconditionFailure.getViolationsList().get(0); assertThat(violation.getType()).isEqualTo(VIOLATION_TYPE_INVALID); @@ -378,7 +384,7 @@ public void unsortedDirectoryInputIsInvalid() { Directory emptyDirectory = Directory.getDefaultInstance(); Digest emptyDirectoryDigest = DIGEST_UTIL.compute(emptyDirectory); PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder(); - AbstractServerInstance.validateActionInputDirectory( + NodeInstance.validateActionInputDirectory( ACTION_INPUT_ROOT_DIRECTORY_PATH, Directory.newBuilder() .addAllDirectories( @@ -392,12 +398,13 @@ public void unsortedDirectoryInputIsInvalid() { .setDigest(emptyDirectoryDigest) .build())) .build(), - /* pathDigests=*/ new Stack<>(), - /* visited=*/ Sets.newHashSet(), - /* directoriesIndex=*/ ImmutableMap.of(emptyDirectoryDigest, emptyDirectory), - /* onInputFiles=*/ file -> {}, - /* onInputDirectories=*/ directory -> {}, - /* onInputDigests=*/ digest -> {}, + /* pathDigests= */ new Stack<>(), + /* visited= */ Sets.newHashSet(), + /* directoriesIndex= */ ImmutableMap.of(emptyDirectoryDigest, emptyDirectory), + /* allowSymlinkTargetAbsolute= */ false, + /* onInputFiles= */ file -> {}, + /* onInputDirectories= */ directory -> {}, + /* onInputDigests= */ digest -> {}, preconditionFailure); assertThat(preconditionFailure.getViolationsCount()).isEqualTo(1); @@ -407,10 +414,52 @@ public void unsortedDirectoryInputIsInvalid() { assertThat(violation.getDescription()).isEqualTo(DIRECTORY_NOT_SORTED); } + @Test + public void shouldValidateIfSymlinkTargetAbsolute() { + // invalid for disallowed + PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder(); + Directory absoluteSymlinkDirectory = + Directory.newBuilder() + .addSymlinks(SymlinkNode.newBuilder().setName("foo").setTarget("/root/secret").build()) + .build(); + NodeInstance.validateActionInputDirectory( + ACTION_INPUT_ROOT_DIRECTORY_PATH, + absoluteSymlinkDirectory, + /* pathDigests= */ new Stack<>(), + /* visited= */ Sets.newHashSet(), + /* directoriesIndex= */ Maps.newHashMap(), + /* allowSymlinkTargetAbsolute= */ false, + /* onInputFile= */ file -> {}, + /* onInputDirectorie= */ directory -> {}, + /* onInputDigest= */ digest -> {}, + preconditionFailure); + + assertThat(preconditionFailure.getViolationsCount()).isEqualTo(1); + Violation violation = preconditionFailure.getViolationsList().get(0); + assertThat(violation.getType()).isEqualTo(VIOLATION_TYPE_INVALID); + assertThat(violation.getSubject()).isEqualTo("/: foo -> /root/secret"); + assertThat(violation.getDescription()).isEqualTo(SYMLINK_TARGET_ABSOLUTE); + + // valid for allowed + preconditionFailure = PreconditionFailure.newBuilder(); + NodeInstance.validateActionInputDirectory( + ACTION_INPUT_ROOT_DIRECTORY_PATH, + absoluteSymlinkDirectory, + /* pathDigests= */ new Stack<>(), + /* visited= */ Sets.newHashSet(), + /* directoriesIndex= */ Maps.newHashMap(), + /* allowSymlinkTargetAbsolute= */ true, + /* onInputFile= */ file -> {}, + /* onInputDirectorie= */ directory -> {}, + /* onInputDigest= */ digest -> {}, + preconditionFailure); + assertThat(preconditionFailure.getViolationsCount()).isEqualTo(0); + } + @Test public void nestedOutputDirectoriesAreInvalid() { PreconditionFailure.Builder preconditionFailureBuilder = PreconditionFailure.newBuilder(); - AbstractServerInstance.validateOutputs( + NodeInstance.validateOutputs( ImmutableSet.of(), ImmutableSet.of(), ImmutableSet.of(), @@ -427,7 +476,7 @@ public void nestedOutputDirectoriesAreInvalid() { @Test public void outputDirectoriesContainingOutputFilesAreInvalid() { PreconditionFailure.Builder preconditionFailureBuilder = PreconditionFailure.newBuilder(); - AbstractServerInstance.validateOutputs( + NodeInstance.validateOutputs( ImmutableSet.of(), ImmutableSet.of(), ImmutableSet.of("foo/bar"), @@ -444,7 +493,7 @@ public void outputDirectoriesContainingOutputFilesAreInvalid() { @Test public void outputFilesAsOutputDirectoryAncestorsAreInvalid() { PreconditionFailure.Builder preconditionFailureBuilder = PreconditionFailure.newBuilder(); - AbstractServerInstance.validateOutputs( + NodeInstance.validateOutputs( ImmutableSet.of(), ImmutableSet.of(), ImmutableSet.of("foo"), @@ -460,7 +509,7 @@ public void outputFilesAsOutputDirectoryAncestorsAreInvalid() { @Test public void emptyArgumentListIsInvalid() { - AbstractServerInstance instance = new DummyServerInstance(); + NodeInstance instance = new DummyServerInstance(); PreconditionFailure.Builder preconditionFailureBuilder = PreconditionFailure.newBuilder(); instance.validateCommand( @@ -480,7 +529,7 @@ public void emptyArgumentListIsInvalid() { @Test public void absoluteWorkingDirectoryIsInvalid() { - AbstractServerInstance instance = new DummyServerInstance(); + NodeInstance instance = new DummyServerInstance(); PreconditionFailure.Builder preconditionFailureBuilder = PreconditionFailure.newBuilder(); instance.validateCommand( @@ -500,7 +549,7 @@ public void absoluteWorkingDirectoryIsInvalid() { @Test public void undeclaredWorkingDirectoryIsInvalid() { - AbstractServerInstance instance = new DummyServerInstance(); + NodeInstance instance = new DummyServerInstance(); Digest inputRootDigest = DIGEST_UTIL.compute(Directory.getDefaultInstance()); PreconditionFailure.Builder preconditionFailureBuilder = PreconditionFailure.newBuilder(); @@ -519,6 +568,115 @@ public void undeclaredWorkingDirectoryIsInvalid() { assertThat(violation.getDescription()).isEqualTo("working directory is not an input directory"); } + /*- + * / -> valid dir + * bar/ -> missing dir with digest 'missing' and non-zero size + * foo/ -> missing dir with digest 'missing' and non-zero size + */ + @Test + public void multipleIdenticalDirectoryMissingAreAllPreconditionFailures() { + Digest missingDirectoryDigest = Digest.newBuilder().setHash("missing").setSizeBytes(1).build(); + PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder(); + Directory root = + Directory.newBuilder() + .addAllDirectories( + ImmutableList.of( + DirectoryNode.newBuilder() + .setName("bar") + .setDigest(missingDirectoryDigest) + .build(), + DirectoryNode.newBuilder() + .setName("foo") + .setDigest(missingDirectoryDigest) + .build())) + .build(); + NodeInstance.validateActionInputDirectory( + ACTION_INPUT_ROOT_DIRECTORY_PATH, + root, + /* pathDigests= */ new Stack<>(), + /* visited= */ Sets.newHashSet(), + /* directoriesIndex= */ ImmutableMap.of(), + /* allowSymlinkTargetAbsolute= */ false, + /* onInputFiles= */ file -> {}, + /* onInputDirectories= */ directory -> {}, + /* onInputDigests= */ digest -> {}, + preconditionFailure); + + String missingSubject = "blobs/" + DigestUtil.toString(missingDirectoryDigest); + String missingFmt = "The directory `/%s` was not found in the CAS."; + assertThat(preconditionFailure.getViolationsCount()).isEqualTo(2); + Violation violation = preconditionFailure.getViolationsList().get(0); + assertThat(violation.getType()).isEqualTo(VIOLATION_TYPE_MISSING); + assertThat(violation.getSubject()).isEqualTo(missingSubject); + assertThat(violation.getDescription()).isEqualTo(String.format(missingFmt, "bar")); + violation = preconditionFailure.getViolationsList().get(1); + assertThat(violation.getType()).isEqualTo(VIOLATION_TYPE_MISSING); + assertThat(violation.getSubject()).isEqualTo(missingSubject); + assertThat(violation.getDescription()).isEqualTo(String.format(missingFmt, "foo")); + } + + /*- + * / -> valid dir + * bar/ -> valid dir + * baz/ -> missing dir with digest 'missing-empty' and zero size + * quux/ -> missing dir with digest 'missing' and non-zero size + * foo/ -> valid dir with digest from /bar/, making it a copy of above + * + * Only duplicated-bar appears in the index + * Empty directory needs short circuit in all cases + * Result should be 2 missing directory paths, no errors + */ + @Test + public void validationRevisitReplicatesPreconditionFailures() { + Digest missingEmptyDirectoryDigest = Digest.newBuilder().setHash("missing-empty").build(); + Digest missingDirectoryDigest = Digest.newBuilder().setHash("missing").setSizeBytes(1).build(); + Directory foo = + Directory.newBuilder() + .addAllDirectories( + ImmutableList.of( + DirectoryNode.newBuilder() + .setName("baz") + .setDigest(missingEmptyDirectoryDigest) + .build(), + DirectoryNode.newBuilder() + .setName("quux") + .setDigest(missingDirectoryDigest) + .build())) + .build(); + Digest fooDigest = DIGEST_UTIL.compute(foo); + PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder(); + Directory root = + Directory.newBuilder() + .addAllDirectories( + ImmutableList.of( + DirectoryNode.newBuilder().setName("bar").setDigest(fooDigest).build(), + DirectoryNode.newBuilder().setName("foo").setDigest(fooDigest).build())) + .build(); + NodeInstance.validateActionInputDirectory( + ACTION_INPUT_ROOT_DIRECTORY_PATH, + root, + /* pathDigests= */ new Stack<>(), + /* visited= */ Sets.newHashSet(), + /* directoriesIndex= */ ImmutableMap.of(fooDigest, foo), + /* allowSymlinkTargetAbsolute= */ false, + /* onInputFiles= */ file -> {}, + /* onInputDirectories= */ directory -> {}, + /* onInputDigests= */ digest -> {}, + preconditionFailure); + + String missingSubject = "blobs/" + DigestUtil.toString(missingDirectoryDigest); + String missingFmt = "The directory `/%s` was not found in the CAS."; + assertThat(preconditionFailure.getViolationsCount()).isEqualTo(2); + Violation violation = preconditionFailure.getViolationsList().get(0); + assertThat(violation.getType()).isEqualTo(VIOLATION_TYPE_MISSING); + assertThat(violation.getSubject()).isEqualTo(missingSubject); + assertThat(violation.getDescription()).isEqualTo(String.format(missingFmt, "bar/quux")); + violation = preconditionFailure.getViolationsList().get(1); + assertThat(violation.getType()).isEqualTo(VIOLATION_TYPE_MISSING); + assertThat(violation.getSubject()).isEqualTo(missingSubject); + assertThat(violation.getDescription()).isEqualTo(String.format(missingFmt, "foo/quux")); + } + @SuppressWarnings("unchecked") private static void doBlob( ContentAddressableStorage contentAddressableStorage, @@ -538,7 +696,7 @@ private static void doBlob( .get( eq(Compressor.Value.IDENTITY), eq(digest), - /* offset=*/ eq(0L), + /* offset= */ eq(0L), eq(digest.getSizeBytes()), any(ServerCallStreamObserver.class), eq(requestMetadata)); @@ -563,8 +721,7 @@ public void outputDirectoriesFilesAreEnsuredPresent() throws Exception { .build(); ContentAddressableStorage contentAddressableStorage = mock(ContentAddressableStorage.class); ActionCache actionCache = mock(ActionCache.class); - AbstractServerInstance instance = - new DummyServerInstance(contentAddressableStorage, actionCache); + NodeInstance instance = new DummyServerInstance(contentAddressableStorage, actionCache); Tree tree = Tree.newBuilder() @@ -608,7 +765,7 @@ public void outputDirectoriesFilesAreEnsuredPresent() throws Exception { .get( eq(Compressor.Value.IDENTITY), eq(treeDigest), - /* offset=*/ eq(0L), + /* offset= */ eq(0L), eq(treeDigest.getSizeBytes()), any(ServerCallStreamObserver.class), eq(requestMetadata)); @@ -624,7 +781,7 @@ public void fetchBlobWriteCompleteIsSuccess() throws Exception { Digest expectedDigest = contentDigest.toBuilder().setSizeBytes(-1).build(); ContentAddressableStorage contentAddressableStorage = mock(ContentAddressableStorage.class); - AbstractServerInstance instance = new DummyServerInstance(contentAddressableStorage, null); + NodeInstance instance = new DummyServerInstance(contentAddressableStorage, null); RequestMetadata requestMetadata = RequestMetadata.getDefaultInstance(); Write write = mock(Write.class); diff --git a/src/test/java/build/buildfarm/instance/shard/BUILD b/src/test/java/build/buildfarm/instance/shard/BUILD index 6a501a1b68..dc22e29237 100644 --- a/src/test/java/build/buildfarm/instance/shard/BUILD +++ b/src/test/java/build/buildfarm/instance/shard/BUILD @@ -1,7 +1,191 @@ java_test( - name = "tests", + name = "DispatchedMonitorTest", size = "small", - srcs = glob(["*.java"]), + srcs = [ + "DispatchedMonitorTest.java", + "UnobservableWatcher.java", + ], + data = ["//examples:example_configs"], + test_class = "build.buildfarm.AllTests", + deps = [ + "//src/main/java/build/buildfarm/actioncache", + "//src/main/java/build/buildfarm/backplane", + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/config", + "//src/main/java/build/buildfarm/instance", + "//src/main/java/build/buildfarm/instance/server", + "//src/main/java/build/buildfarm/instance/shard", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "//src/test/java/build/buildfarm:test_runner", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@maven//:com_github_ben_manes_caffeine_caffeine", + "@maven//:com_google_guava_guava", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_protobuf_protobuf_java_util", + "@maven//:com_google_truth_truth", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:io_grpc_grpc_stub", + "@maven//:org_mockito_mockito_core", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) + +java_test( + name = "RedisShardBackplaneTest", + size = "small", + srcs = [ + "RedisShardBackplaneTest.java", + "UnobservableWatcher.java", + ], + data = ["//examples:example_configs"], + test_class = "build.buildfarm.AllTests", + deps = [ + "//src/main/java/build/buildfarm/actioncache", + "//src/main/java/build/buildfarm/backplane", + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/config", + "//src/main/java/build/buildfarm/common/redis", + "//src/main/java/build/buildfarm/instance", + "//src/main/java/build/buildfarm/instance/server", + "//src/main/java/build/buildfarm/instance/shard", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "//src/test/java/build/buildfarm:test_runner", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@maven//:com_github_ben_manes_caffeine_caffeine", + "@maven//:com_google_guava_guava", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_protobuf_protobuf_java_util", + "@maven//:com_google_truth_truth", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:io_grpc_grpc_stub", + "@maven//:org_mockito_mockito_core", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) + +java_test( + name = "RedisShardSubscriberTest", + size = "small", + srcs = [ + "RedisShardSubscriberTest.java", + "UnobservableWatcher.java", + ], + data = ["//examples:example_configs"], + test_class = "build.buildfarm.AllTests", + deps = [ + "//src/main/java/build/buildfarm/actioncache", + "//src/main/java/build/buildfarm/backplane", + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/config", + "//src/main/java/build/buildfarm/instance", + "//src/main/java/build/buildfarm/instance/server", + "//src/main/java/build/buildfarm/instance/shard", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "//src/test/java/build/buildfarm:test_runner", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@maven//:com_github_ben_manes_caffeine_caffeine", + "@maven//:com_google_guava_guava", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_protobuf_protobuf_java_util", + "@maven//:com_google_truth_truth", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:io_grpc_grpc_stub", + "@maven//:org_mockito_mockito_core", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) + +java_test( + name = "ServerInstanceTest", + size = "small", + srcs = [ + "ServerInstanceTest.java", + "UnobservableWatcher.java", + ], + data = ["//examples:example_configs"], + test_class = "build.buildfarm.AllTests", + deps = [ + "//src/main/java/build/buildfarm/actioncache", + "//src/main/java/build/buildfarm/backplane", + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/config", + "//src/main/java/build/buildfarm/instance", + "//src/main/java/build/buildfarm/instance/server", + "//src/main/java/build/buildfarm/instance/shard", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "//src/test/java/build/buildfarm:test_runner", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@maven//:com_github_ben_manes_caffeine_caffeine", + "@maven//:com_google_guava_guava", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_protobuf_protobuf_java_util", + "@maven//:com_google_truth_truth", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:io_grpc_grpc_stub", + "@maven//:org_mockito_mockito_core", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) + +java_test( + name = "TimedWatcherTest", + size = "small", + srcs = [ + "TimedWatcherTest.java", + "UnobservableWatcher.java", + ], + data = ["//examples:example_configs"], + test_class = "build.buildfarm.AllTests", + deps = [ + "//src/main/java/build/buildfarm/actioncache", + "//src/main/java/build/buildfarm/backplane", + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/config", + "//src/main/java/build/buildfarm/instance", + "//src/main/java/build/buildfarm/instance/server", + "//src/main/java/build/buildfarm/instance/shard", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "//src/test/java/build/buildfarm:test_runner", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@maven//:com_github_ben_manes_caffeine_caffeine", + "@maven//:com_google_guava_guava", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_protobuf_protobuf_java_util", + "@maven//:com_google_truth_truth", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:io_grpc_grpc_stub", + "@maven//:org_mockito_mockito_core", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) + +java_test( + name = "UtilTest", + size = "small", + srcs = [ + "UnobservableWatcher.java", + "UtilTest.java", + ], data = ["//examples:example_configs"], test_class = "build.buildfarm.AllTests", deps = [ @@ -14,10 +198,8 @@ java_test( "//src/main/java/build/buildfarm/instance/shard", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", - "//third_party/jedis", - "@googleapis//:google_longrunning_operations_java_proto", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_github_ben_manes_caffeine_caffeine", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", @@ -28,6 +210,26 @@ java_test( "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", "@maven//:org_mockito_mockito_core", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@maven//:redis_clients_jedis", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) + +java_test( + name = "JedisCasWorkerMapTest", + size = "small", + srcs = [ + "JedisCasWorkerMapTest.java", + ], + test_class = "build.buildfarm.AllTests", + deps = [ + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/redis", + "//src/main/java/build/buildfarm/instance/shard", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "//src/test/java/build/buildfarm:test_runner", + "@maven//:com_github_fppt_jedis_mock", + "@maven//:com_google_truth_truth", + "@maven//:redis_clients_jedis", ], ) diff --git a/src/test/java/build/buildfarm/instance/shard/DispatchedMonitorTest.java b/src/test/java/build/buildfarm/instance/shard/DispatchedMonitorTest.java index c1410739b6..777f78a2e3 100644 --- a/src/test/java/build/buildfarm/instance/shard/DispatchedMonitorTest.java +++ b/src/test/java/build/buildfarm/instance/shard/DispatchedMonitorTest.java @@ -22,7 +22,7 @@ import static org.mockito.Mockito.eq; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; import build.buildfarm.backplane.Backplane; @@ -58,7 +58,7 @@ public void setUp() throws InterruptedException, IOException { MockitoAnnotations.initMocks(this); when(requeuer.apply(any(QueueEntry.class), any(Duration.class))) .thenReturn(immediateFailedFuture(new RuntimeException("unexpected requeue"))); - dispatchedMonitor = new DispatchedMonitor(backplane, requeuer, /* intervalSeconds=*/ 0); + dispatchedMonitor = new DispatchedMonitor(backplane, requeuer, /* intervalSeconds= */ 0); } @Test @@ -67,7 +67,7 @@ public void shouldStopWhenBackplanIsStopped() { dispatchedMonitor.run(); verify(backplane, atLeastOnce()).isStopped(); - verifyZeroInteractions(requeuer); + verifyNoInteractions(requeuer); } @Test @@ -78,7 +78,7 @@ public void shouldIgnoreOperationWithFutureRequeueAt() throws Exception { ImmutableList.of( DispatchedOperation.newBuilder().setRequeueAt(Long.MAX_VALUE).build())); dispatchedMonitor.iterate(); - verifyZeroInteractions(requeuer); + verifyNoInteractions(requeuer); } @Test @@ -123,7 +123,7 @@ public void shouldIgnoreOperationWithEarlyRequeueAtWhenBackplaneDisallowsQueuein .build())); when(requeuer.apply(eq(queueEntry), any(Duration.class))).thenReturn(immediateFuture(null)); dispatchedMonitor.iterate(); - verifyZeroInteractions(requeuer); + verifyNoInteractions(requeuer); } @Test @@ -132,7 +132,7 @@ public void shouldIgnoreBackplaneException() throws Exception { when(backplane.getDispatchedOperations()) .thenThrow(new IOException("transient error condition")); dispatchedMonitor.iterate(); - verifyZeroInteractions(requeuer); + verifyNoInteractions(requeuer); } @Test diff --git a/src/test/java/build/buildfarm/instance/shard/JedisCasWorkerMapTest.java b/src/test/java/build/buildfarm/instance/shard/JedisCasWorkerMapTest.java new file mode 100644 index 0000000000..7b666f470d --- /dev/null +++ b/src/test/java/build/buildfarm/instance/shard/JedisCasWorkerMapTest.java @@ -0,0 +1,63 @@ +package build.buildfarm.instance.shard; + +import static com.google.common.truth.Truth.assertThat; + +import build.bazel.remote.execution.v2.Digest; +import build.buildfarm.common.DigestUtil; +import build.buildfarm.common.redis.RedisClient; +import com.github.fppt.jedismock.RedisServer; +import com.github.fppt.jedismock.server.ServiceOptions; +import java.io.IOException; +import java.net.InetAddress; +import java.util.Arrays; +import java.util.Collections; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import redis.clients.jedis.HostAndPort; +import redis.clients.jedis.JedisCluster; + +@RunWith(JUnit4.class) +public class JedisCasWorkerMapTest { + private static final String CAS_PREFIX = "ContentAddressableStorage"; + + private RedisServer redisServer; + private RedisClient redisClient; + private JedisCasWorkerMap jedisCasWorkerMap; + + @Before + public void setup() throws IOException { + redisServer = + RedisServer.newRedisServer(0, InetAddress.getByName("localhost")) + .setOptions(ServiceOptions.defaultOptions().withClusterModeEnabled()) + .start(); + redisClient = + new RedisClient( + new JedisCluster( + Collections.singleton( + new HostAndPort(redisServer.getHost(), redisServer.getBindPort())))); + jedisCasWorkerMap = new JedisCasWorkerMap(CAS_PREFIX, 60); + } + + @Test + public void testSetExpire() throws IOException { + Digest testDigest1 = Digest.newBuilder().setHash("abc").build(); + Digest testDigest2 = Digest.newBuilder().setHash("xyz").build(); + + String casKey1 = CAS_PREFIX + ":" + DigestUtil.toString(testDigest1); + String casKey2 = CAS_PREFIX + ":" + DigestUtil.toString(testDigest2); + + redisClient.run(jedis -> jedis.sadd(casKey1, "worker1")); + jedisCasWorkerMap.setExpire(redisClient, Arrays.asList(testDigest1, testDigest2)); + + assertThat((Long) redisClient.call(jedis -> jedis.ttl(casKey1))).isGreaterThan(0L); + assertThat((Long) redisClient.call(jedis -> jedis.ttl(casKey2))).isEqualTo(-2L); + } + + @After + public void tearDown() throws IOException { + redisServer.stop(); + } +} diff --git a/src/test/java/build/buildfarm/instance/shard/RedisShardBackplaneTest.java b/src/test/java/build/buildfarm/instance/shard/RedisShardBackplaneTest.java index a893540fd1..d6812ed259 100644 --- a/src/test/java/build/buildfarm/instance/shard/RedisShardBackplaneTest.java +++ b/src/test/java/build/buildfarm/instance/shard/RedisShardBackplaneTest.java @@ -16,28 +16,39 @@ import static build.buildfarm.instance.shard.RedisShardBackplane.parseOperationChange; import static com.google.common.truth.Truth.assertThat; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.any; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; -import build.bazel.remote.execution.v2.Platform; +import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.RequestMetadata; import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.common.config.Queue; +import build.buildfarm.common.redis.BalancedRedisQueue; +import build.buildfarm.common.redis.RedisClient; +import build.buildfarm.common.redis.RedisHashMap; +import build.buildfarm.common.redis.RedisMap; import build.buildfarm.v1test.DispatchedOperation; import build.buildfarm.v1test.ExecuteEntry; import build.buildfarm.v1test.OperationChange; import build.buildfarm.v1test.QueueEntry; +import build.buildfarm.v1test.ShardWorker; import build.buildfarm.v1test.WorkerChange; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; import com.google.longrunning.Operation; import com.google.protobuf.util.JsonFormat; import java.io.IOException; -import java.util.ArrayList; +import java.time.Instant; import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.function.Supplier; import org.junit.Before; @@ -48,42 +59,46 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.UnifiedJedis; @RunWith(JUnit4.class) public class RedisShardBackplaneTest { - private RedisShardBackplane backplane; private BuildfarmConfigs configs = BuildfarmConfigs.getInstance(); - @Mock Supplier mockJedisClusterFactory; + @Mock Supplier mockJedisClusterFactory; @Before public void setUp() throws IOException { configs.getBackplane().setOperationExpire(10); - configs.getBackplane().setSubscribeToBackplane(false); - configs.getBackplane().setRunFailsafeOperation(false); configs.getBackplane().setQueues(new Queue[] {}); MockitoAnnotations.initMocks(this); } + public RedisShardBackplane createBackplane(String name) { + return new RedisShardBackplane( + name, + /* subscribeToBackplane= */ false, + /* runFailsafeOperation= */ false, + o -> o, + o -> o, + mockJedisClusterFactory); + } + @Test public void workersWithInvalidProtobufAreRemoved() throws IOException { - JedisCluster jedisCluster = mock(JedisCluster.class); - when(mockJedisClusterFactory.get()).thenReturn(jedisCluster); - when(jedisCluster.hgetAll(configs.getBackplane().getWorkersHashName() + "_storage")) + UnifiedJedis jedis = mock(UnifiedJedis.class); + when(mockJedisClusterFactory.get()).thenReturn(jedis); + when(jedis.hgetAll(configs.getBackplane().getWorkersHashName() + "_storage")) .thenReturn(ImmutableMap.of("foo", "foo")); - when(jedisCluster.hdel(configs.getBackplane().getWorkersHashName() + "_storage", "foo")) + when(jedis.hdel(configs.getBackplane().getWorkersHashName() + "_storage", "foo")) .thenReturn(1L); - backplane = - new RedisShardBackplane( - "invalid-protobuf-worker-removed-test", (o) -> o, (o) -> o, mockJedisClusterFactory); + RedisShardBackplane backplane = createBackplane("invalid-protobuf-worker-removed-test"); backplane.start("startTime/test:0000"); assertThat(backplane.getStorageWorkers()).isEmpty(); - verify(jedisCluster, times(1)) - .hdel(configs.getBackplane().getWorkersHashName() + "_storage", "foo"); + verify(jedis, times(1)).hdel(configs.getBackplane().getWorkersHashName() + "_storage", "foo"); ArgumentCaptor changeCaptor = ArgumentCaptor.forClass(String.class); - verify(jedisCluster, times(1)) + verify(jedis, times(1)) .publish(eq(configs.getBackplane().getWorkerChannel()), changeCaptor.capture()); String json = changeCaptor.getValue(); WorkerChange.Builder builder = WorkerChange.newBuilder(); @@ -93,12 +108,10 @@ public void workersWithInvalidProtobufAreRemoved() throws IOException { assertThat(workerChange.getTypeCase()).isEqualTo(WorkerChange.TypeCase.REMOVE); } - void verifyChangePublished(JedisCluster jedis) throws IOException { + OperationChange verifyChangePublished(String channel, UnifiedJedis jedis) throws IOException { ArgumentCaptor changeCaptor = ArgumentCaptor.forClass(String.class); - verify(jedis, times(1)).publish(eq(backplane.operationChannel("op")), changeCaptor.capture()); - OperationChange opChange = parseOperationChange(changeCaptor.getValue()); - assertThat(opChange.hasReset()).isTrue(); - assertThat(opChange.getReset().getOperation().getName()).isEqualTo("op"); + verify(jedis, times(1)).publish(eq(channel), changeCaptor.capture()); + return parseOperationChange(changeCaptor.getValue()); } String operationName(String name) { @@ -107,74 +120,76 @@ String operationName(String name) { @Test public void prequeueUpdatesOperationPrequeuesAndPublishes() throws IOException { - JedisCluster jedisCluster = mock(JedisCluster.class); - when(mockJedisClusterFactory.get()).thenReturn(jedisCluster); - backplane = - new RedisShardBackplane( - "prequeue-operation-test", (o) -> o, (o) -> o, mockJedisClusterFactory); - backplane.start("startTime/test:0000"); + UnifiedJedis jedis = mock(UnifiedJedis.class); + RedisClient client = new RedisClient(jedis); + DistributedState state = new DistributedState(); + state.operations = mock(Operations.class); + state.prequeue = mock(BalancedRedisQueue.class); + RedisShardBackplane backplane = createBackplane("prequeue-operation-test"); + backplane.start(client, state, "startTime/test:0000"); final String opName = "op"; ExecuteEntry executeEntry = ExecuteEntry.newBuilder().setOperationName(opName).build(); Operation op = Operation.newBuilder().setName(opName).build(); backplane.prequeue(executeEntry, op); - verify(mockJedisClusterFactory, times(1)).get(); - verify(jedisCluster, times(1)) - .setex( - operationName(opName), - configs.getBackplane().getOperationExpire(), - RedisShardBackplane.operationPrinter.print(op)); - verify(jedisCluster, times(1)) - .lpush( - configs.getBackplane().getPreQueuedOperationsListName(), - JsonFormat.printer().print(executeEntry)); - verifyChangePublished(jedisCluster); + verify(state.operations, times(1)) + .insert( + eq(jedis), + any(String.class), + eq(opName), + eq(RedisShardBackplane.operationPrinter.print(op))); + verifyNoMoreInteractions(state.operations); + OperationChange opChange = verifyChangePublished(backplane.operationChannel(opName), jedis); + assertThat(opChange.hasReset()).isTrue(); + assertThat(opChange.getReset().getOperation().getName()).isEqualTo(opName); } @Test public void queuingPublishes() throws IOException { - JedisCluster jedisCluster = mock(JedisCluster.class); - when(mockJedisClusterFactory.get()).thenReturn(jedisCluster); - backplane = - new RedisShardBackplane( - "requeue-operation-test", (o) -> o, (o) -> o, mockJedisClusterFactory); + UnifiedJedis jedis = mock(UnifiedJedis.class); + when(mockJedisClusterFactory.get()).thenReturn(jedis); + RedisShardBackplane backplane = createBackplane("requeue-operation-test"); backplane.start("startTime/test:0000"); final String opName = "op"; backplane.queueing(opName); verify(mockJedisClusterFactory, times(1)).get(); - verifyChangePublished(jedisCluster); + OperationChange opChange = verifyChangePublished(backplane.operationChannel(opName), jedis); + assertThat(opChange.hasReset()).isTrue(); + assertThat(opChange.getReset().getOperation().getName()).isEqualTo(opName); } @Test public void requeueDispatchedOperationQueuesAndPublishes() throws IOException { - JedisCluster jedisCluster = mock(JedisCluster.class); - when(mockJedisClusterFactory.get()).thenReturn(jedisCluster); - backplane = - new RedisShardBackplane( - "requeue-operation-test", (o) -> o, (o) -> o, mockJedisClusterFactory); - backplane.start("startTime/test:0000"); + UnifiedJedis jedis = mock(UnifiedJedis.class); + RedisClient client = new RedisClient(jedis); + DistributedState state = new DistributedState(); + state.dispatchedOperations = mock(RedisHashMap.class); + state.operationQueue = mock(OperationQueue.class); + RedisShardBackplane backplane = createBackplane("requeue-operation-test"); + backplane.start(client, state, "startTime/test:0000"); final String opName = "op"; - when(jedisCluster.hdel(configs.getBackplane().getDispatchedOperationsHashName(), opName)) - .thenReturn(1L); - QueueEntry queueEntry = QueueEntry.newBuilder() - .setExecuteEntry(ExecuteEntry.newBuilder().setOperationName("op").build()) + .setExecuteEntry(ExecuteEntry.newBuilder().setOperationName(opName).build()) .build(); backplane.requeueDispatchedOperation(queueEntry); - verify(mockJedisClusterFactory, times(1)).get(); - verify(jedisCluster, times(1)) - .hdel(configs.getBackplane().getDispatchedOperationsHashName(), opName); - verify(jedisCluster, times(1)) - .lpush( - configs.getBackplane().getQueuedOperationsListName(), - JsonFormat.printer().print(queueEntry)); - verifyChangePublished(jedisCluster); + verify(state.dispatchedOperations, times(1)).remove(jedis, opName); + verifyNoMoreInteractions(state.dispatchedOperations); + verify(state.operationQueue, times(1)) + .push( + jedis, + queueEntry.getPlatform().getPropertiesList(), + JsonFormat.printer().print(queueEntry), + queueEntry.getExecuteEntry().getExecutionPolicy().getPriority()); + verifyNoMoreInteractions(state.operationQueue); + OperationChange opChange = verifyChangePublished(backplane.operationChannel(opName), jedis); + assertThat(opChange.hasReset()).isTrue(); + assertThat(opChange.getReset().getOperation().getName()).isEqualTo(opName); } @Test @@ -186,28 +201,30 @@ public void dispatchedOperationsShowProperRequeueAmount0to1() int REQUEUE_AMOUNT_WHEN_READY_TO_REQUEUE = 1; // create a backplane - JedisCluster jedisCluster = mock(JedisCluster.class); - when(mockJedisClusterFactory.get()).thenReturn(jedisCluster); - backplane = - new RedisShardBackplane( - "requeue-operation-test", (o) -> o, (o) -> o, mockJedisClusterFactory); - backplane.start("startTime/test:0000"); + UnifiedJedis jedis = mock(UnifiedJedis.class); + RedisClient client = new RedisClient(jedis); + DistributedState state = new DistributedState(); + state.dispatchedOperations = mock(RedisHashMap.class); + state.dispatchingOperations = mock(RedisMap.class); + state.operationQueue = mock(OperationQueue.class); + RedisShardBackplane backplane = createBackplane("requeue-operation-test"); + backplane.start(client, state, "startTime/test:0000"); // ARRANGE // Assume the operation queue is already populated with a first-time operation. // this means the operation's requeue amount will be 0. // The jedis cluser is also mocked to assume success on other operations. + final String opName = "op"; QueueEntry queueEntry = QueueEntry.newBuilder() - .setExecuteEntry(ExecuteEntry.newBuilder().setOperationName("op").build()) + .setExecuteEntry(ExecuteEntry.newBuilder().setOperationName(opName).build()) .setRequeueAttempts(STARTING_REQUEUE_AMOUNT) .build(); String queueEntryJson = JsonFormat.printer().print(queueEntry); - when(jedisCluster.brpoplpush(any(String.class), any(String.class), any(Integer.class))) - .thenReturn(queueEntryJson); - + when(state.operationQueue.dequeue(eq(jedis), any(List.class))).thenReturn(queueEntryJson); + when(state.operationQueue.removeFromDequeue(jedis, queueEntryJson)).thenReturn(true); // PRE-ASSERT - when(jedisCluster.hsetnx(any(String.class), any(String.class), any(String.class))) + when(state.dispatchedOperations.insertIfMissing(eq(jedis), eq(opName), any(String.class))) .thenAnswer( args -> { // Extract the operation that was dispatched @@ -220,17 +237,26 @@ public void dispatchedOperationsShowProperRequeueAmount0to1() assertThat(dispatchedOperation.getQueueEntry().getRequeueAttempts()) .isEqualTo(REQUEUE_AMOUNT_WHEN_DISPATCHED); - return 1L; + return true; }); // ACT // dispatch the operation and test properties of the QueueEntry and internal jedis calls. - List properties = new ArrayList<>(); - QueueEntry readyForRequeue = backplane.dispatchOperation(properties); + QueueEntry readyForRequeue = backplane.dispatchOperation(ImmutableList.of()); // ASSERT assertThat(readyForRequeue.getRequeueAttempts()) .isEqualTo(REQUEUE_AMOUNT_WHEN_READY_TO_REQUEUE); + verify(state.operationQueue, times(1)).dequeue(eq(jedis), any(List.class)); + verify(state.operationQueue, times(1)).removeFromDequeue(jedis, queueEntryJson); + verifyNoMoreInteractions(state.operationQueue); + verify(state.dispatchedOperations, times(1)) + .insertIfMissing( + eq(jedis), eq(queueEntry.getExecuteEntry().getOperationName()), any(String.class)); + verifyNoMoreInteractions(state.dispatchedOperations); + verify(state.dispatchingOperations, times(1)) + .remove(jedis, queueEntry.getExecuteEntry().getOperationName()); + verifyNoMoreInteractions(state.dispatchingOperations); } @Test @@ -242,27 +268,30 @@ public void dispatchedOperationsShowProperRequeueAmount1to2() int REQUEUE_AMOUNT_WHEN_READY_TO_REQUEUE = 2; // create a backplane - JedisCluster jedisCluster = mock(JedisCluster.class); - when(mockJedisClusterFactory.get()).thenReturn(jedisCluster); - backplane = - new RedisShardBackplane( - "requeue-operation-test", (o) -> o, (o) -> o, mockJedisClusterFactory); - backplane.start("startTime/test:0000"); + UnifiedJedis jedis = mock(UnifiedJedis.class); + RedisClient client = new RedisClient(jedis); + DistributedState state = new DistributedState(); + state.dispatchedOperations = mock(RedisHashMap.class); + state.dispatchingOperations = mock(RedisMap.class); + state.operationQueue = mock(OperationQueue.class); + RedisShardBackplane backplane = createBackplane("requeue-operation-test"); + backplane.start(client, state, "startTime/test:0000"); + // ARRANGE // Assume the operation queue is already populated from a first re-queue. // this means the operation's requeue amount will be 1. // The jedis cluser is also mocked to assume success on other operations. + final String opName = "op"; QueueEntry queueEntry = QueueEntry.newBuilder() - .setExecuteEntry(ExecuteEntry.newBuilder().setOperationName("op").build()) + .setExecuteEntry(ExecuteEntry.newBuilder().setOperationName(opName).build()) .setRequeueAttempts(STARTING_REQUEUE_AMOUNT) .build(); String queueEntryJson = JsonFormat.printer().print(queueEntry); - when(jedisCluster.brpoplpush(any(String.class), any(String.class), any(Integer.class))) - .thenReturn(queueEntryJson); - + when(state.operationQueue.dequeue(eq(jedis), any(List.class))).thenReturn(queueEntryJson); + when(state.operationQueue.removeFromDequeue(jedis, queueEntryJson)).thenReturn(true); // PRE-ASSERT - when(jedisCluster.hsetnx(any(String.class), any(String.class), any(String.class))) + when(state.dispatchedOperations.insertIfMissing(eq(jedis), eq(opName), any(String.class))) .thenAnswer( args -> { // Extract the operation that was dispatched @@ -275,26 +304,33 @@ public void dispatchedOperationsShowProperRequeueAmount1to2() assertThat(dispatchedOperation.getQueueEntry().getRequeueAttempts()) .isEqualTo(REQUEUE_AMOUNT_WHEN_DISPATCHED); - return 1L; + return true; }); // ACT // dispatch the operation and test properties of the QueueEntry and internal jedis calls. - List properties = new ArrayList<>(); - QueueEntry readyForRequeue = backplane.dispatchOperation(properties); + QueueEntry readyForRequeue = backplane.dispatchOperation(ImmutableList.of()); // ASSERT assertThat(readyForRequeue.getRequeueAttempts()) .isEqualTo(REQUEUE_AMOUNT_WHEN_READY_TO_REQUEUE); + verify(state.operationQueue, times(1)).dequeue(eq(jedis), any(List.class)); + verify(state.operationQueue, times(1)).removeFromDequeue(jedis, queueEntryJson); + verifyNoMoreInteractions(state.operationQueue); + verify(state.dispatchedOperations, times(1)) + .insertIfMissing( + eq(jedis), eq(queueEntry.getExecuteEntry().getOperationName()), any(String.class)); + verifyNoMoreInteractions(state.dispatchedOperations); + verify(state.dispatchingOperations, times(1)) + .remove(jedis, queueEntry.getExecuteEntry().getOperationName()); + verifyNoMoreInteractions(state.dispatchingOperations); } @Test public void completeOperationUndispatches() throws IOException { - JedisCluster jedisCluster = mock(JedisCluster.class); - when(mockJedisClusterFactory.get()).thenReturn(jedisCluster); - backplane = - new RedisShardBackplane( - "complete-operation-test", (o) -> o, (o) -> o, mockJedisClusterFactory); + UnifiedJedis jedis = mock(UnifiedJedis.class); + when(mockJedisClusterFactory.get()).thenReturn(jedis); + RedisShardBackplane backplane = createBackplane("complete-operation-test"); backplane.start("startTime/test:0000"); final String opName = "op"; @@ -302,18 +338,15 @@ public void completeOperationUndispatches() throws IOException { backplane.completeOperation(opName); verify(mockJedisClusterFactory, times(1)).get(); - verify(jedisCluster, times(1)) - .hdel(configs.getBackplane().getDispatchedOperationsHashName(), opName); + verify(jedis, times(1)).hdel(configs.getBackplane().getDispatchedOperationsHashName(), opName); } @Test @Ignore public void deleteOperationDeletesAndPublishes() throws IOException { - JedisCluster jedisCluster = mock(JedisCluster.class); - when(mockJedisClusterFactory.get()).thenReturn(jedisCluster); - backplane = - new RedisShardBackplane( - "delete-operation-test", (o) -> o, (o) -> o, mockJedisClusterFactory); + UnifiedJedis jedis = mock(UnifiedJedis.class); + when(mockJedisClusterFactory.get()).thenReturn(jedis); + RedisShardBackplane backplane = createBackplane("delete-operation-test"); backplane.start("startTime/test:0000"); final String opName = "op"; @@ -321,23 +354,22 @@ public void deleteOperationDeletesAndPublishes() throws IOException { backplane.deleteOperation(opName); verify(mockJedisClusterFactory, times(1)).get(); - verify(jedisCluster, times(1)) - .hdel(configs.getBackplane().getDispatchedOperationsHashName(), opName); - verify(jedisCluster, times(1)).del(operationName(opName)); - verifyChangePublished(jedisCluster); + verify(jedis, times(1)).hdel(configs.getBackplane().getDispatchedOperationsHashName(), opName); + verify(jedis, times(1)).del(operationName(opName)); + OperationChange opChange = verifyChangePublished(backplane.operationChannel(opName), jedis); + assertThat(opChange.hasReset()).isTrue(); + assertThat(opChange.getReset().getOperation().getName()).isEqualTo(opName); } @Test public void invocationsCanBeBlacklisted() throws IOException { UUID toolInvocationId = UUID.randomUUID(); - JedisCluster jedisCluster = mock(JedisCluster.class); + UnifiedJedis jedis = mock(UnifiedJedis.class); String invocationBlacklistKey = configs.getBackplane().getInvocationBlacklistPrefix() + ":" + toolInvocationId; - when(jedisCluster.exists(invocationBlacklistKey)).thenReturn(true); - when(mockJedisClusterFactory.get()).thenReturn(jedisCluster); - backplane = - new RedisShardBackplane( - "invocation-blacklist-test", o -> o, o -> o, mockJedisClusterFactory); + when(jedis.exists(invocationBlacklistKey)).thenReturn(true); + when(mockJedisClusterFactory.get()).thenReturn(jedis); + RedisShardBackplane backplane = createBackplane("invocation-blacklist-test"); backplane.start("startTime/test:0000"); assertThat( @@ -348,6 +380,76 @@ public void invocationsCanBeBlacklisted() throws IOException { .isTrue(); verify(mockJedisClusterFactory, times(1)).get(); - verify(jedisCluster, times(1)).exists(invocationBlacklistKey); + verify(jedis, times(1)).exists(invocationBlacklistKey); + } + + @Test + public void testGetWorkersStartTime() throws IOException { + UnifiedJedis jedis = mock(UnifiedJedis.class); + when(mockJedisClusterFactory.get()).thenReturn(jedis); + RedisShardBackplane backplane = createBackplane("workers-starttime-test"); + backplane.start("startTime/test:0000"); + + Set workerNames = ImmutableSet.of("worker1", "worker2", "missing_worker"); + + String storageWorkerKey = configs.getBackplane().getWorkersHashName() + "_storage"; + Map workersJson = + Map.of( + "worker1", + "{\"endpoint\": \"worker1\", \"expireAt\": \"9999999999999\", \"workerType\": 3," + + " \"firstRegisteredAt\": \"1685292624000\"}", + "worker2", + "{\"endpoint\": \"worker2\", \"expireAt\": \"9999999999999\", \"workerType\": 3," + + " \"firstRegisteredAt\": \"1685282624000\"}"); + when(jedis.hgetAll(storageWorkerKey)).thenReturn(workersJson); + Map workersStartTime = backplane.getWorkersStartTimeInEpochSecs(workerNames); + assertThat(workersStartTime.size()).isEqualTo(2); + assertThat(workersStartTime.get("worker1")).isEqualTo(1685292624L); + assertThat(workersStartTime.get("worker2")).isEqualTo(1685282624L); + assertThat(workersStartTime.get("missing_worker")).isNull(); + } + + @Test + public void getDigestInsertTime() throws IOException { + UnifiedJedis jedis = mock(UnifiedJedis.class); + when(mockJedisClusterFactory.get()).thenReturn(jedis); + RedisShardBackplane backplane = createBackplane("digest-inserttime-test"); + backplane.start("startTime/test:0000"); + long ttl = 3600L; + long expirationInSecs = configs.getBackplane().getCasExpire(); + when(jedis.ttl("ContentAddressableStorage:abc/0")).thenReturn(ttl); + + Digest digest = Digest.newBuilder().setHash("abc").build(); + + Long insertTimeInSecs = backplane.getDigestInsertTime(digest); + + // Assuming there could be at most 2s delay in execution of both + // `Instant.now().getEpochSecond()` call. + assertThat(insertTimeInSecs) + .isGreaterThan(Instant.now().getEpochSecond() - expirationInSecs + ttl - 2); + assertThat(insertTimeInSecs).isAtMost(Instant.now().getEpochSecond() - expirationInSecs + ttl); + } + + @Test + public void testAddWorker() throws IOException { + ShardWorker shardWorker = + ShardWorker.newBuilder().setWorkerType(3).setFirstRegisteredAt(1703065913000L).build(); + UnifiedJedis jedis = mock(UnifiedJedis.class); + when(mockJedisClusterFactory.get()).thenReturn(jedis); + when(jedis.hset(anyString(), anyString(), anyString())).thenReturn(1L); + RedisShardBackplane backplane = createBackplane("add-worker-test"); + backplane.start("addWorker/test:0000"); + backplane.addWorker(shardWorker); + verify(jedis, times(1)) + .hset( + configs.getBackplane().getWorkersHashName() + "_storage", + "", + JsonFormat.printer().print(shardWorker)); + verify(jedis, times(1)) + .hset( + configs.getBackplane().getWorkersHashName() + "_execute", + "", + JsonFormat.printer().print(shardWorker)); + verify(jedis, times(1)).publish(anyString(), anyString()); } } diff --git a/src/test/java/build/buildfarm/instance/shard/RedisShardSubscriberTest.java b/src/test/java/build/buildfarm/instance/shard/RedisShardSubscriberTest.java index 671181cbca..8169b01c69 100644 --- a/src/test/java/build/buildfarm/instance/shard/RedisShardSubscriberTest.java +++ b/src/test/java/build/buildfarm/instance/shard/RedisShardSubscriberTest.java @@ -24,8 +24,9 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static redis.clients.jedis.Protocol.Keyword.SUBSCRIBE; -import static redis.clients.jedis.Protocol.Keyword.UNSUBSCRIBE; +import static redis.clients.jedis.Protocol.Command.SUBSCRIBE; +import static redis.clients.jedis.Protocol.Command.UNSUBSCRIBE; +import static redis.clients.jedis.Protocol.ResponseKeyword; import build.buildfarm.instance.shard.RedisShardSubscriber.TimedWatchFuture; import build.buildfarm.v1test.OperationChange; @@ -35,9 +36,11 @@ import com.google.common.collect.MultimapBuilder; import com.google.common.collect.Multimaps; import com.google.common.collect.Sets; +import com.google.common.truth.Correspondence; import com.google.longrunning.Operation; import com.google.protobuf.InvalidProtocolBufferException; import java.time.Instant; +import java.util.Arrays; import java.util.List; import java.util.Set; import java.util.concurrent.BlockingQueue; @@ -47,7 +50,11 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import redis.clients.jedis.Client; +import redis.clients.jedis.CommandArguments; +import redis.clients.jedis.Connection; +import redis.clients.jedis.args.Rawable; +import redis.clients.jedis.args.RawableFactory; +import redis.clients.jedis.commands.ProtocolCommand; @RunWith(JUnit4.class) public class RedisShardSubscriberTest { @@ -63,17 +70,23 @@ public void unwatch() { } } - private static class TestClient extends Client { - private final Set subscriptions = Sets.newConcurrentHashSet(); - private final BlockingQueue> replyQueue = new LinkedBlockingQueue<>(); - private final BlockingQueue> pendingReplies = new LinkedBlockingQueue<>(); + private static class TestConnection extends Connection { + private final Set subscriptions = Sets.newConcurrentHashSet(); + private final BlockingQueue pendingRequests = new LinkedBlockingQueue<>(); + private final BlockingQueue replyQueue = new LinkedBlockingQueue<>(); + private final BlockingQueue pendingReplies = new LinkedBlockingQueue<>(); - Set getSubscriptions() { + Set getSubscriptions() { return subscriptions; } @Override public List getUnflushedObjectMultiBulkReply() { + throw new UnsupportedOperationException("getUnflushedObjectMultiBulkReply is deprecated"); + } + + @Override + public Object getUnflushedObject() { try { return replyQueue.take(); } catch (InterruptedException e) { @@ -83,53 +96,104 @@ public List getUnflushedObjectMultiBulkReply() { } @Override - public void subscribe(String... channels) { - for (String channel : channels) { - if (subscriptions.add(channel)) { - pendingReplies.add( - ImmutableList.of(SUBSCRIBE.raw, channel.getBytes(), (long) subscriptions.size())); + public void sendCommand(final CommandArguments cargs) { + ProtocolCommand command = cargs.getCommand(); + if (command == SUBSCRIBE) { + pendingRequests.add(() -> subscribe(cargs)); + } else if (command == UNSUBSCRIBE) { + if (cargs.size() == 1) { + // only includes command + pendingRequests.add(() -> unsubscribe()); } else { - throw new IllegalStateException("subscribe to already subscribed channel: " + channel); + pendingRequests.add(() -> unsubscribe(cargs)); } + } else { + throw new UnsupportedOperationException(cargs.toString()); } } @Override - public void unsubscribe() { + public void setTimeoutInfinite() { + // ignore + } + + private void subscribe(Iterable channels) { + boolean isCommand = true; + for (Rawable channel : channels) { + if (isCommand) { + isCommand = false; + } else { + if (subscriptions.add(channel)) { + pendingReplies.add( + ImmutableList.of( + ResponseKeyword.SUBSCRIBE.getRaw(), + channel.getRaw(), + (long) subscriptions.size())); + } else { + throw new IllegalStateException("subscribe to already subscribed channel: " + channel); + } + } + } + } + + private void unsubscribe() { long counter = subscriptions.size(); - for (String channel : subscriptions) { - pendingReplies.add(ImmutableList.of(UNSUBSCRIBE.raw, channel.getBytes(), --counter)); + for (Rawable channel : subscriptions) { + pendingReplies.add( + ImmutableList.of(ResponseKeyword.UNSUBSCRIBE.getRaw(), channel.getRaw(), --counter)); } subscriptions.clear(); } - @Override - public void unsubscribe(String... channels) { - for (String channel : channels) { - if (subscriptions.remove(channel)) { - pendingReplies.add( - ImmutableList.of(UNSUBSCRIBE.raw, channel.getBytes(), (long) subscriptions.size())); + private void unsubscribe(Iterable channels) { + boolean isCommand = true; + for (Rawable channel : channels) { + if (isCommand) { + isCommand = false; } else { - throw new IllegalStateException("unsubscribe from unknown channel: " + channel); + if (subscriptions.remove(channel)) { + pendingReplies.add( + ImmutableList.of( + ResponseKeyword.UNSUBSCRIBE.getRaw(), + channel.getRaw(), + (long) subscriptions.size())); + } else { + throw new IllegalStateException("unsubscribe from unknown channel: " + channel); + } } } } @Override public void flush() { + for (Runnable request = pendingRequests.poll(); + request != null; + request = pendingRequests.poll()) { + request.run(); + } pendingReplies.drainTo(replyQueue); } } RedisShardSubscriber createSubscriber( ListMultimap watchers, Executor executor) { - return new RedisShardSubscriber(watchers, /* workers=*/ null, "worker-channel", executor); + return new RedisShardSubscriber(watchers, /* workers= */ null, "worker-channel", executor); } RedisShardSubscriber createSubscriber(ListMultimap watchers) { - return createSubscriber(watchers, /* executor=*/ null); + return createSubscriber(watchers, /* executor= */ null); } + private static final Correspondence rawableCorrespondence = + Correspondence.from( + new Correspondence.BinaryPredicate() { + @Override + public boolean apply(Rawable a, Rawable e) { + return Arrays.equals(a.getRaw(), e.getRaw()); + } + }, + "is rawably equivalent to"); + @Test public void novelChannelWatcherSubscribes() throws InterruptedException { ListMultimap watchers = @@ -137,8 +201,8 @@ public void novelChannelWatcherSubscribes() throws InterruptedException { MultimapBuilder.linkedHashKeys().arrayListValues().build()); RedisShardSubscriber operationSubscriber = createSubscriber(watchers, directExecutor()); - TestClient testClient = new TestClient(); - Thread proceedThread = new Thread(() -> operationSubscriber.proceed(testClient)); + TestConnection testConnection = new TestConnection(); + Thread proceedThread = new Thread(() -> operationSubscriber.start(testConnection)); proceedThread.start(); while (!operationSubscriber.isSubscribed()) { MICROSECONDS.sleep(10); @@ -151,7 +215,9 @@ public void novelChannelWatcherSubscribes() throws InterruptedException { .isEqualTo(novelWatcher); String[] channels = new String[1]; channels[0] = novelChannel; - assertThat(testClient.getSubscriptions()).contains(novelChannel); + assertThat(testConnection.getSubscriptions()) + .comparingElementsUsing(rawableCorrespondence) + .contains(RawableFactory.from(novelChannel)); operationSubscriber.unsubscribe(); proceedThread.join(); } @@ -228,8 +294,8 @@ public void doneResetOperationIsObservedAndUnsubscribed() MultimapBuilder.linkedHashKeys().arrayListValues().build()); RedisShardSubscriber operationSubscriber = createSubscriber(watchers, directExecutor()); - TestClient testClient = new TestClient(); - Thread proceedThread = new Thread(() -> operationSubscriber.proceed(testClient)); + TestConnection testConnection = new TestConnection(); + Thread proceedThread = new Thread(() -> operationSubscriber.start(testConnection)); proceedThread.start(); while (!operationSubscriber.isSubscribed()) { MICROSECONDS.sleep(10); @@ -257,7 +323,7 @@ public void observe(Operation operation) { .build()) .build())); assertThat(observed.get()).isTrue(); - assertThat(testClient.getSubscriptions()).doesNotContain(doneMessageChannel); + assertThat(testConnection.getSubscriptions()).doesNotContain(doneMessageChannel); operationSubscriber.unsubscribe(); proceedThread.join(); } diff --git a/src/test/java/build/buildfarm/instance/shard/ShardInstanceTest.java b/src/test/java/build/buildfarm/instance/shard/ServerInstanceTest.java similarity index 89% rename from src/test/java/build/buildfarm/instance/shard/ShardInstanceTest.java rename to src/test/java/build/buildfarm/instance/shard/ServerInstanceTest.java index a74e7d6f6f..5ef0db01ec 100644 --- a/src/test/java/build/buildfarm/instance/shard/ShardInstanceTest.java +++ b/src/test/java/build/buildfarm/instance/shard/ServerInstanceTest.java @@ -20,9 +20,9 @@ import static build.buildfarm.common.Actions.invalidActionVerboseMessage; import static build.buildfarm.common.Errors.VIOLATION_TYPE_INVALID; import static build.buildfarm.common.Errors.VIOLATION_TYPE_MISSING; -import static build.buildfarm.instance.server.AbstractServerInstance.INVALID_PLATFORM; -import static build.buildfarm.instance.server.AbstractServerInstance.MISSING_ACTION; -import static build.buildfarm.instance.server.AbstractServerInstance.MISSING_COMMAND; +import static build.buildfarm.instance.server.NodeInstance.INVALID_PLATFORM; +import static build.buildfarm.instance.server.NodeInstance.MISSING_ACTION; +import static build.buildfarm.instance.server.NodeInstance.MISSING_COMMAND; import static com.google.common.base.Predicates.notNull; import static com.google.common.truth.Truth.assertThat; import static com.google.common.util.concurrent.Futures.immediateFuture; @@ -31,9 +31,14 @@ import static java.util.concurrent.Executors.newSingleThreadExecutor; import static java.util.concurrent.TimeUnit.SECONDS; import static org.mockito.AdditionalAnswers.answer; +import static org.mockito.ArgumentMatchers.anyIterable; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.atMost; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; @@ -80,6 +85,8 @@ import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.longrunning.Operation; import com.google.protobuf.Any; @@ -95,6 +102,7 @@ import io.grpc.stub.ServerCallStreamObserver; import io.grpc.stub.StreamObserver; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -115,20 +123,19 @@ import org.junit.runners.JUnit4; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatcher; -import org.mockito.Matchers; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import org.mockito.stubbing.Answer; @RunWith(JUnit4.class) -public class ShardInstanceTest { +public class ServerInstanceTest { private static final DigestUtil DIGEST_UTIL = new DigestUtil(HashFunction.SHA256); private static final long QUEUE_TEST_TIMEOUT_SECONDS = 3; private static final Duration DEFAULT_TIMEOUT = Durations.fromSeconds(60); private static final Command SIMPLE_COMMAND = Command.newBuilder().addAllArguments(ImmutableList.of("true")).build(); - private ShardInstance instance; + private ServerInstance instance; private Map blobDigests; @Mock private Backplane mockBackplane; @@ -145,22 +152,22 @@ public void setUp() throws InterruptedException { blobDigests = Maps.newHashMap(); ActionCache actionCache = new ShardActionCache(10, mockBackplane, newDirectExecutorService()); instance = - new ShardInstance( + new ServerInstance( "shard", DIGEST_UTIL, mockBackplane, actionCache, - /* runDispatchedMonitor=*/ false, - /* dispatchedMonitorIntervalSeconds=*/ 0, - /* runOperationQueuer=*/ false, - /* maxBlobSize=*/ 0, - /* maxCpu=*/ 1, - /* maxRequeueAttempts=*/ 1, - /* maxActionTimeout=*/ Duration.getDefaultInstance(), - /* useDenyList=*/ true, + /* runDispatchedMonitor= */ false, + /* dispatchedMonitorIntervalSeconds= */ 0, + /* runOperationQueuer= */ false, + /* maxBlobSize= */ 0, + /* maxCpu= */ 1, + /* maxRequeueAttempts= */ 1, + /* maxActionTimeout= */ Duration.getDefaultInstance(), + /* useDenyList= */ true, mockOnStop, CacheBuilder.newBuilder().build(mockInstanceLoader), - /* actionCacheFetchService=*/ listeningDecorator(newSingleThreadExecutor()), + /* actionCacheFetchService= */ listeningDecorator(newSingleThreadExecutor()), false); instance.start("startTime/test:0000"); } @@ -171,11 +178,11 @@ public void tearDown() throws InterruptedException { } private Action createAction() throws Exception { - return createAction(/* provideAction=*/ true, /* provideCommand=*/ true, SIMPLE_COMMAND); + return createAction(/* provideAction= */ true, /* provideCommand= */ true, SIMPLE_COMMAND); } private Action createAction(boolean provideAction) throws Exception { - return createAction(provideAction, /* provideCommand=*/ true, SIMPLE_COMMAND); + return createAction(provideAction, /* provideCommand= */ true, SIMPLE_COMMAND); } private Action createAction(boolean provideAction, boolean provideCommand) throws Exception { @@ -270,11 +277,11 @@ public void executeCallsPrequeueWithAction() throws IOException { Watcher mockWatcher = mock(Watcher.class); instance.execute( actionDigest, - /* skipCacheLookup=*/ false, + /* skipCacheLookup= */ false, ExecutionPolicy.getDefaultInstance(), ResultsCachePolicy.getDefaultInstance(), RequestMetadata.getDefaultInstance(), - /* watcher=*/ mockWatcher); + /* watcher= */ mockWatcher); verify(mockWatcher, times(1)).observe(any(Operation.class)); ArgumentCaptor executeEntryCaptor = ArgumentCaptor.forClass(ExecuteEntry.class); verify(mockBackplane, times(1)).prequeue(executeEntryCaptor.capture(), any(Operation.class)); @@ -349,7 +356,7 @@ public void queueActionFailsQueueEligibility() throws Exception { .setSkipCacheLookup(true) .build(); - when(mockBackplane.propertiesEligibleForQueue(Matchers.anyList())).thenReturn(false); + when(mockBackplane.propertiesEligibleForQueue(anyList())).thenReturn(false); when(mockBackplane.canQueue()).thenReturn(true); @@ -377,7 +384,10 @@ public void queueActionFailsQueueEligibility() throws Exception { .setType(VIOLATION_TYPE_INVALID) .setSubject(INVALID_PLATFORM) .setDescription( - "properties are not valid for queue eligibility: []. If you think your queue should still accept these poperties without them being specified in queue configuration, consider configuring the queue with `allow_unmatched: True`")) + "properties are not valid for queue eligibility: []. If you think your" + + " queue should still accept these poperties without them being" + + " specified in queue configuration, consider configuring the queue" + + " with `allow_unmatched: True`")) .build(); ExecuteResponse executeResponse = ExecuteResponse.newBuilder() @@ -476,7 +486,7 @@ public void queueDirectoryMissingErrorsOperation() throws Exception { .setSkipCacheLookup(true) .build(); - when(mockBackplane.propertiesEligibleForQueue(Matchers.anyList())).thenReturn(true); + when(mockBackplane.propertiesEligibleForQueue(anyList())).thenReturn(true); when(mockBackplane.canQueue()).thenReturn(true); @@ -550,7 +560,7 @@ public void queueOperationPutFailureCancelsOperation() throws Exception { .setSkipCacheLookup(true) .build(); - when(mockBackplane.propertiesEligibleForQueue(Matchers.anyList())).thenReturn(true); + when(mockBackplane.propertiesEligibleForQueue(anyList())).thenReturn(true); when(mockBackplane.canQueue()).thenReturn(true); @@ -616,7 +626,7 @@ public void queueWithFailedCacheCheckContinues() throws Exception { .setActionDigest(actionKey.getDigest()) .build(); - when(mockBackplane.propertiesEligibleForQueue(Matchers.anyList())).thenReturn(true); + when(mockBackplane.propertiesEligibleForQueue(anyList())).thenReturn(true); when(mockBackplane.canQueue()).thenReturn(true); @@ -710,11 +720,11 @@ public void duplicateExecutionsServedFromCacheAreForcedToSkipLookup() throws Exc Watcher mockWatcher = mock(Watcher.class); instance.execute( actionDigest, - /* skipCacheLookup=*/ false, + /* skipCacheLookup= */ false, ExecutionPolicy.getDefaultInstance(), ResultsCachePolicy.getDefaultInstance(), requestMetadata, - /* watcher=*/ mockWatcher); + /* watcher= */ mockWatcher); verify(mockWatcher, times(1)).observe(any(Operation.class)); ArgumentCaptor executeEntryCaptor = ArgumentCaptor.forClass(ExecuteEntry.class); verify(mockBackplane, times(1)).prequeue(executeEntryCaptor.capture(), any(Operation.class)); @@ -729,7 +739,7 @@ public void requeueFailsOnMissingDirectory() throws Exception { Digest missingDirectoryDigest = Digest.newBuilder().setHash("missing-directory").setSizeBytes(1).build(); - when(mockBackplane.propertiesEligibleForQueue(Matchers.anyList())).thenReturn(true); + when(mockBackplane.propertiesEligibleForQueue(anyList())).thenReturn(true); when(mockBackplane.getOperation(eq(operationName))) .thenReturn( @@ -1054,27 +1064,31 @@ public void containsBlobReflectsWorkerWithUnknownSize() throws Exception { @Test public void findMissingBlobsTest_ViaBackPlane() throws Exception { - - Set activeWorkers = new HashSet<>(Arrays.asList("worker1", "worker2", "worker3")); - Set expiredWorker = new HashSet<>(Arrays.asList("workerX", "workerY", "workerZ")); + Set activeWorkers = ImmutableSet.of("worker1", "worker2", "worker3"); + Set expiredWorkers = ImmutableSet.of("workerX", "workerY", "workerZ"); + Set imposterWorkers = ImmutableSet.of("imposter1", "imposter2", "imposter3"); Set availableDigests = - new HashSet<>( - Arrays.asList( - Digest.newBuilder().setHash("toBeFound1").setSizeBytes(1).build(), - Digest.newBuilder().setHash("toBeFound2").setSizeBytes(1).build(), - Digest.newBuilder().setHash("toBeFound3").setSizeBytes(1).build(), - // a copy is added in final digest list - Digest.newBuilder().setHash("toBeFoundDuplicate").setSizeBytes(1).build())); + ImmutableSet.of( + Digest.newBuilder().setHash("toBeFound1").setSizeBytes(1).build(), + Digest.newBuilder().setHash("toBeFound2").setSizeBytes(1).build(), + Digest.newBuilder().setHash("toBeFound3").setSizeBytes(1).build(), + // a copy is added in final digest list + Digest.newBuilder().setHash("toBeFoundDuplicate").setSizeBytes(1).build()); Set missingDigests = - new HashSet<>( - Arrays.asList( - Digest.newBuilder().setHash("missing1").setSizeBytes(1).build(), - Digest.newBuilder().setHash("missing2").setSizeBytes(1).build(), - Digest.newBuilder().setHash("missing3").setSizeBytes(1).build(), - // a copy is added in final digest list - Digest.newBuilder().setHash("missingDuplicate").setSizeBytes(1).build())); + ImmutableSet.of( + Digest.newBuilder().setHash("missing1").setSizeBytes(1).build(), + Digest.newBuilder().setHash("missing2").setSizeBytes(1).build(), + Digest.newBuilder().setHash("missing3").setSizeBytes(1).build(), + // a copy is added in final digest list + Digest.newBuilder().setHash("missingDuplicate").setSizeBytes(1).build()); + + Set digestAvailableOnImposters = + ImmutableSet.of( + Digest.newBuilder().setHash("toBeFoundOnImposter1").setSizeBytes(1).build(), + Digest.newBuilder().setHash("toBeFoundOnImposter2").setSizeBytes(1).build(), + Digest.newBuilder().setHash("toBeFoundOnImposter3").setSizeBytes(1).build()); Set emptyDigests = new HashSet<>( @@ -1087,6 +1101,7 @@ public void findMissingBlobsTest_ViaBackPlane() throws Exception { availableDigests, missingDigests, emptyDigests, + digestAvailableOnImposters, Arrays.asList( Digest.newBuilder().setHash("toBeFoundDuplicate").setSizeBytes(1).build(), Digest.newBuilder().setHash("missingDuplicate").setSizeBytes(1).build())); @@ -1097,24 +1112,53 @@ public void findMissingBlobsTest_ViaBackPlane() throws Exception { digestAndWorkersMap.put(digest, getRandomSubset(activeWorkers)); } for (Digest digest : missingDigests) { - digestAndWorkersMap.put(digest, getRandomSubset(expiredWorker)); + digestAndWorkersMap.put(digest, getRandomSubset(expiredWorkers)); + } + for (Digest digest : digestAvailableOnImposters) { + digestAndWorkersMap.put(digest, getRandomSubset(imposterWorkers)); } BuildfarmConfigs buildfarmConfigs = instance.getBuildFarmConfigs(); buildfarmConfigs.getServer().setFindMissingBlobsViaBackplane(true); - when(mockBackplane.getStorageWorkers()).thenReturn(activeWorkers); + Set activeAndImposterWorkers = + Sets.newHashSet(Iterables.concat(activeWorkers, imposterWorkers)); + + when(mockBackplane.getStorageWorkers()).thenReturn(activeAndImposterWorkers); when(mockBackplane.getBlobDigestsWorkers(any(Iterable.class))).thenReturn(digestAndWorkersMap); + when(mockInstanceLoader.load(anyString())).thenReturn(mockWorkerInstance); + when(mockWorkerInstance.findMissingBlobs(anyIterable(), any(RequestMetadata.class))) + .thenReturn(Futures.immediateFuture(new ArrayList<>())); + + long serverStartTime = 1686951033L; // june 15th, 2023 + Map workersStartTime = new HashMap<>(); + for (String worker : activeAndImposterWorkers) { + workersStartTime.put(worker, serverStartTime); + } + when(mockBackplane.getWorkersStartTimeInEpochSecs(activeAndImposterWorkers)) + .thenReturn(workersStartTime); + long oneDay = 86400L; + for (Digest digest : availableDigests) { + when(mockBackplane.getDigestInsertTime(digest)).thenReturn(serverStartTime + oneDay); + } + for (Digest digest : digestAvailableOnImposters) { + when(mockBackplane.getDigestInsertTime(digest)).thenReturn(serverStartTime - oneDay); + } Iterable actualMissingDigests = instance.findMissingBlobs(allDigests, RequestMetadata.getDefaultInstance()).get(); + Iterable expectedMissingDigests = + Iterables.concat(missingDigests, digestAvailableOnImposters); + + assertThat(actualMissingDigests).containsExactlyElementsIn(expectedMissingDigests); + verify(mockWorkerInstance, atMost(3)) + .findMissingBlobs(anyIterable(), any(RequestMetadata.class)); + verify(mockWorkerInstance, atLeast(1)) + .findMissingBlobs(anyIterable(), any(RequestMetadata.class)); for (Digest digest : actualMissingDigests) { assertThat(digest).isNotIn(availableDigests); assertThat(digest).isNotIn(emptyDigests); - assertThat(digest).isIn(missingDigests); - } - for (Digest digest : missingDigests) { - assertThat(digest).isIn(actualMissingDigests); + assertThat(digest).isIn(expectedMissingDigests); } // reset BuildfarmConfigs diff --git a/src/test/java/build/buildfarm/instance/shard/UnobservableWatcher.java b/src/test/java/build/buildfarm/instance/shard/UnobservableWatcher.java index a6070f1c2f..993e86343b 100644 --- a/src/test/java/build/buildfarm/instance/shard/UnobservableWatcher.java +++ b/src/test/java/build/buildfarm/instance/shard/UnobservableWatcher.java @@ -19,7 +19,7 @@ class UnobservableWatcher extends TimedWatcher { UnobservableWatcher() { - this(/* expiresAt=*/ Instant.now()); + this(/* expiresAt= */ Instant.now()); } UnobservableWatcher(Instant expiresAt) { diff --git a/src/test/java/build/buildfarm/instance/shard/UtilTest.java b/src/test/java/build/buildfarm/instance/shard/UtilTest.java index 01e8a017fc..c8e7ddbcbb 100644 --- a/src/test/java/build/buildfarm/instance/shard/UtilTest.java +++ b/src/test/java/build/buildfarm/instance/shard/UtilTest.java @@ -25,7 +25,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; import build.bazel.remote.execution.v2.Digest; @@ -80,7 +80,7 @@ public void correctMissingBlobChecksAllWorkers() throws Exception { correctMissingBlob( backplane, workerSet, - /* originalLocationSet=*/ ImmutableSet.of(), + /* originalLocationSet= */ ImmutableSet.of(), workerInstanceFactory, digest, directExecutor(), @@ -120,7 +120,7 @@ public void correctMissingBlobFailsImmediatelyOnUnretriable() throws Interrupted correctMissingBlob( backplane, workerSet, - /* originalLocationSet=*/ ImmutableSet.of(), + /* originalLocationSet= */ ImmutableSet.of(), workerInstanceFactory, digest, directExecutor(), @@ -135,7 +135,7 @@ public void correctMissingBlobFailsImmediatelyOnUnretriable() throws Interrupted } verify(instance, times(1)).findMissingBlobs(eq(digests), any(RequestMetadata.class)); assertThat(caughtException).isTrue(); - verifyZeroInteractions(backplane); + verifyNoInteractions(backplane); } @Test @@ -170,7 +170,7 @@ public void correctMissingBlobIgnoresUnavailableWorkers() throws Exception { correctMissingBlob( backplane, workerSet, - /* originalLocationSet=*/ ImmutableSet.of(), + /* originalLocationSet= */ ImmutableSet.of(), workerInstanceFactory, digest, directExecutor(), @@ -207,7 +207,7 @@ public void correctMissingBlobRetriesRetriable() throws Exception { correctMissingBlob( backplane, workerSet, - /* originalLocationSet=*/ ImmutableSet.of(), + /* originalLocationSet= */ ImmutableSet.of(), workerInstanceFactory, digest, directExecutor(), @@ -247,7 +247,7 @@ public void correctMissingBlobIgnoresBackplaneException() throws Exception { correctMissingBlob( backplane, workerSet, - /* originalLocationSet=*/ ImmutableSet.of(), + /* originalLocationSet= */ ImmutableSet.of(), workerInstanceFactory, digest, directExecutor(), diff --git a/src/test/java/build/buildfarm/instance/stub/BUILD b/src/test/java/build/buildfarm/instance/stub/BUILD index 8f44f01795..9b75da1698 100644 --- a/src/test/java/build/buildfarm/instance/stub/BUILD +++ b/src/test/java/build/buildfarm/instance/stub/BUILD @@ -12,16 +12,18 @@ java_test( "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/instance/stub", "//src/test/java/build/buildfarm:test_runner", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", + "//third_party/remote-apis:build_bazel_remote_execution_v2_remote_execution_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_truth_truth", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_inprocess", "@maven//:io_grpc_grpc_stub", + "@maven//:io_grpc_grpc_util", "@maven//:org_mockito_mockito_core", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_grpc", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/test/java/build/buildfarm/instance/stub/ByteStreamUploaderTest.java b/src/test/java/build/buildfarm/instance/stub/ByteStreamUploaderTest.java index 6d3e25cac7..711374e942 100644 --- a/src/test/java/build/buildfarm/instance/stub/ByteStreamUploaderTest.java +++ b/src/test/java/build/buildfarm/instance/stub/ByteStreamUploaderTest.java @@ -79,10 +79,10 @@ public void onCompleted() {} }); ByteStreamUploader uploader = new ByteStreamUploader( - /* instanceName=*/ null, + /* instanceName= */ null, InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(), - /* callCredentials=*/ null, - /* callTimeoutSecs=*/ 1, + /* callCredentials= */ null, + /* callTimeoutSecs= */ 1, NO_RETRIES); Chunker chunker = Chunker.builder().setInput(ByteString.copyFromUtf8("Hello, World!")).build(); uploader.uploadBlob(HashCode.fromInt(42), chunker); diff --git a/src/test/java/build/buildfarm/instance/stub/StubInstanceTest.java b/src/test/java/build/buildfarm/instance/stub/StubInstanceTest.java index e50e3f0028..1b3ba336ac 100644 --- a/src/test/java/build/buildfarm/instance/stub/StubInstanceTest.java +++ b/src/test/java/build/buildfarm/instance/stub/StubInstanceTest.java @@ -20,7 +20,8 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; import build.bazel.remote.execution.v2.Action; import build.bazel.remote.execution.v2.ActionCacheGrpc.ActionCacheImplBase; @@ -64,6 +65,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -98,7 +100,7 @@ public void tearDown() throws InterruptedException { fakeServer.awaitTermination(); } - private Instance newStubInstance(String instanceName) { + private StubInstance newStubInstance(String instanceName) { return new StubInstance( instanceName, DIGEST_UTIL, @@ -110,7 +112,7 @@ public void reflectsNameAndDigestUtil() { String test1Name = "test1"; ByteString test1Blob = ByteString.copyFromUtf8(test1Name); DigestUtil test1DigestUtil = new DigestUtil(DigestUtil.HashFunction.SHA256); - Instance test1Instance = new StubInstance(test1Name, test1DigestUtil, /* channel=*/ null); + Instance test1Instance = new StubInstance(test1Name, test1DigestUtil, /* channel= */ null); assertThat(test1Instance.getName()).isEqualTo(test1Name); assertThat(test1Instance.getDigestUtil().compute(test1Blob)) .isEqualTo(test1DigestUtil.compute(test1Blob)); @@ -119,7 +121,7 @@ public void reflectsNameAndDigestUtil() { String test2Name = "test2"; ByteString test2Blob = ByteString.copyFromUtf8(test2Name); DigestUtil test2DigestUtil = new DigestUtil(DigestUtil.HashFunction.MD5); - Instance test2Instance = new StubInstance(test2Name, test2DigestUtil, /* channel=*/ null); + Instance test2Instance = new StubInstance(test2Name, test2DigestUtil, /* channel= */ null); assertThat(test2Instance.getName()).isEqualTo(test2Name); assertThat(test2Instance.getDigestUtil().compute(test2Blob)) .isEqualTo(test2DigestUtil.compute(test2Blob)); @@ -197,6 +199,38 @@ public void findMissingBlobs( instance.stop(); } + @Test + public void findMissingBlobsOverSizeLimitRecombines() + throws ExecutionException, InterruptedException { + AtomicReference reference = new AtomicReference<>(); + serviceRegistry.addService( + new ContentAddressableStorageImplBase() { + @Override + public void findMissingBlobs( + FindMissingBlobsRequest request, + StreamObserver responseObserver) { + reference.set(request); + responseObserver.onNext( + FindMissingBlobsResponse.newBuilder() + .addAllMissingBlobDigests(request.getBlobDigestsList()) + .build()); + responseObserver.onCompleted(); + } + }); + StubInstance instance = newStubInstance("findMissingBlobs-test"); + instance.maxRequestSize = 1024; + ImmutableList.Builder builder = ImmutableList.builder(); + // generates digest size * 1024 serialized size at least + for (int i = 0; i < 1024; i++) { + ByteString content = ByteString.copyFromUtf8("Hello, World! " + UUID.randomUUID()); + builder.add(DIGEST_UTIL.compute(content)); + } + ImmutableList digests = builder.build(); + assertThat(instance.findMissingBlobs(digests, RequestMetadata.getDefaultInstance()).get()) + .containsExactlyElementsIn(digests); + instance.stop(); + } + @Test public void outputStreamWrites() throws IOException, InterruptedException { AtomicReference writtenContent = new AtomicReference<>(); @@ -287,7 +321,7 @@ public void batchUpdateBlobs( ImmutableList digests = ImmutableList.of(DIGEST_UTIL.compute(first), DIGEST_UTIL.compute(last)); assertThat(instance.putAllBlobs(blobs, RequestMetadata.getDefaultInstance())) - .containsAllIn(digests); + .containsAtLeastElementsIn(digests); } @Test @@ -386,7 +420,7 @@ public void read(ReadRequest request, StreamObserver responseObser assertThat(ioException).isNotNull(); Status status = Status.fromThrowable(ioException); assertThat(status.getCode()).isEqualTo(Code.UNAVAILABLE); - verifyZeroInteractions(out); + verifyNoInteractions(out); instance.stop(); } @@ -458,7 +492,7 @@ public void read(ReadRequest request, StreamObserver responseObser assertThat(ioException).isNotNull(); Status status = Status.fromThrowable(ioException); assertThat(status.getCode()).isEqualTo(Code.DEADLINE_EXCEEDED); - verifyZeroInteractions(out); + verifyNoInteractions(out); instance.stop(); } @@ -477,7 +511,7 @@ public void readBlobInterchangeDoesNotRequestUntilStarted() { verify(mockBlobObserver, times(1)).setOnReadyHandler(onReadyCaptor.capture()); // call it onReadyCaptor.getValue().run(); - // verify zero interactions with mockRequestStream - verifyZeroInteractions(mockRequestStream); + // verify no more interactions with mockRequestStream + verifyNoMoreInteractions(mockRequestStream); } } diff --git a/src/test/java/build/buildfarm/metrics/BUILD b/src/test/java/build/buildfarm/metrics/BUILD index 8e5d3cace9..3d570580a9 100644 --- a/src/test/java/build/buildfarm/metrics/BUILD +++ b/src/test/java/build/buildfarm/metrics/BUILD @@ -8,14 +8,10 @@ java_test( "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/common/config", "//src/main/java/build/buildfarm/metrics", - "//src/main/java/build/buildfarm/metrics/aws", "//src/main/java/build/buildfarm/metrics/log", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", - "@googleapis//:google_rpc_error_details_java_proto", - "@maven//:com_amazonaws_aws_java_sdk_core", - "@maven//:com_amazonaws_aws_java_sdk_secretsmanager", - "@maven//:com_amazonaws_aws_java_sdk_sns", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_jimfs_jimfs", "@maven//:com_google_protobuf_protobuf_java", diff --git a/src/test/java/build/buildfarm/metrics/MetricsPublisherTest.java b/src/test/java/build/buildfarm/metrics/MetricsPublisherTest.java index 21c9685efd..a775f7744c 100644 --- a/src/test/java/build/buildfarm/metrics/MetricsPublisherTest.java +++ b/src/test/java/build/buildfarm/metrics/MetricsPublisherTest.java @@ -22,7 +22,6 @@ import build.bazel.remote.execution.v2.RequestMetadata; import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.common.config.Metrics; -import build.buildfarm.metrics.aws.AwsMetricsPublisher; import build.buildfarm.metrics.log.LogMetricsPublisher; import build.buildfarm.v1test.OperationRequestMetadata; import com.google.longrunning.Operation; @@ -42,14 +41,12 @@ public class MetricsPublisherTest { private final ExecuteOperationMetadata defaultExecuteOperationMetadata = ExecuteOperationMetadata.getDefaultInstance(); private final RequestMetadata defaultRequestMetadata = - RequestMetadata.getDefaultInstance() - .toBuilder() + RequestMetadata.getDefaultInstance().toBuilder() .setCorrelatedInvocationsId( "http://user@host-name?uuid_source\\u003d%2Fproc%2Fsys%2Fkernel%2Frandom%2Fuuid\\u0026OSTYPE\\u003dlinux-gnu#c09a5efa-f015-4d7b-b889-8ee0d097dff7") .build(); private final Operation defaultOperation = - Operation.getDefaultInstance() - .toBuilder() + Operation.getDefaultInstance().toBuilder() .setDone(true) .setName("shard/operations/123") .build(); @@ -69,19 +66,18 @@ public class MetricsPublisherTest { public void setUp() throws IOException { configs.getServer().setCloudRegion("test"); configs.getServer().setClusterId("buildfarm-test"); - configs.getServer().getMetrics().setPublisher(Metrics.PUBLISHER.AWS); + configs.getServer().getMetrics().setPublisher(Metrics.PUBLISHER.LOG); } @Test public void publishCompleteMetricsTest() throws InvalidProtocolBufferException { Operation operation = - defaultOperation - .toBuilder() + defaultOperation.toBuilder() .setResponse(Any.pack(defaultExecuteResponse)) .setMetadata(Any.pack(defaultExecuteOperationMetadata)) .build(); - AwsMetricsPublisher metricsPublisher = new AwsMetricsPublisher(); + LogMetricsPublisher metricsPublisher = new LogMetricsPublisher(); assertThat( AbstractMetricsPublisher.formatRequestMetadataToJson( metricsPublisher.populateRequestMetadata(operation, defaultRequestMetadata))) @@ -110,7 +106,7 @@ public void publishMetricsWithNoExecuteResponseTest() { Operation operation = defaultOperation.toBuilder().setMetadata(Any.pack(defaultExecuteOperationMetadata)).build(); - assertThat(new AwsMetricsPublisher().populateRequestMetadata(operation, defaultRequestMetadata)) + assertThat(new LogMetricsPublisher().populateRequestMetadata(operation, defaultRequestMetadata)) .isNotNull(); } @@ -119,7 +115,7 @@ public void publishMetricsWithNoExecuteOperationMetadataTest() { Operation operation = defaultOperation.toBuilder().setResponse(Any.pack(defaultExecuteResponse)).build(); - assertThat(new AwsMetricsPublisher().populateRequestMetadata(operation, defaultRequestMetadata)) + assertThat(new LogMetricsPublisher().populateRequestMetadata(operation, defaultRequestMetadata)) .isNotNull(); } @@ -129,13 +125,12 @@ public void preconditionFailureTest() { Status.getDefaultInstance().toBuilder().addDetails(Any.pack(preconditionFailure)).build(); Operation operation = - defaultOperation - .toBuilder() + defaultOperation.toBuilder() .setResponse(Any.pack(defaultExecuteResponse.toBuilder().setStatus(status).build())) .setMetadata(Any.pack(defaultExecuteOperationMetadata)) .build(); - assertThat(new AwsMetricsPublisher().populateRequestMetadata(operation, defaultRequestMetadata)) + assertThat(new LogMetricsPublisher().populateRequestMetadata(operation, defaultRequestMetadata)) .isNotNull(); } diff --git a/src/test/java/build/buildfarm/proxy/http/BUILD b/src/test/java/build/buildfarm/proxy/http/BUILD index 90fcb5d6c1..ddce214e13 100644 --- a/src/test/java/build/buildfarm/proxy/http/BUILD +++ b/src/test/java/build/buildfarm/proxy/http/BUILD @@ -7,15 +7,16 @@ java_test( "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/proxy/http", "//src/test/java/build/buildfarm:test_runner", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_truth_truth", "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_inprocess", "@maven//:io_grpc_grpc_stub", "@maven//:org_mockito_mockito_core", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/test/java/build/buildfarm/proxy/http/ByteStreamServiceTest.java b/src/test/java/build/buildfarm/proxy/http/ByteStreamServiceTest.java index ea57f15dc5..fd81cf4c49 100644 --- a/src/test/java/build/buildfarm/proxy/http/ByteStreamServiceTest.java +++ b/src/test/java/build/buildfarm/proxy/http/ByteStreamServiceTest.java @@ -90,7 +90,7 @@ public void tearDown() throws Exception { } static String createBlobUploadResourceName(String id, Digest digest) { - return createBlobUploadResourceName(/* instanceName=*/ "", id, digest); + return createBlobUploadResourceName(/* instanceName= */ "", id, digest); } static String createBlobUploadResourceName(String instanceName, String id, Digest digest) { @@ -100,7 +100,7 @@ static String createBlobUploadResourceName(String instanceName, String id, Diges } private String createBlobDownloadResourceName(Digest digest) { - return createBlobDownloadResourceName(/* instanceName=*/ "", digest); + return createBlobDownloadResourceName(/* instanceName= */ "", digest); } private String createBlobDownloadResourceName(String instanceName, Digest digest) { diff --git a/src/test/java/build/buildfarm/server/BUILD b/src/test/java/build/buildfarm/server/BUILD index ff60c29b94..1bb96f6674 100644 --- a/src/test/java/build/buildfarm/server/BUILD +++ b/src/test/java/build/buildfarm/server/BUILD @@ -18,12 +18,12 @@ java_test( "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_grpc", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", + "//third_party/remote-apis:build_bazel_remote_execution_v2_remote_execution_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_grpc", + "@com_google_googleapis//google/bytestream:bytestream_java_proto", + "@com_google_googleapis//google/longrunning:longrunning_java_grpc", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@com_google_protobuf//:protobuf_java", - "@googleapis//:google_bytestream_bytestream_java_grpc", - "@googleapis//:google_bytestream_bytestream_java_proto", - "@googleapis//:google_longrunning_operations_java_grpc", - "@googleapis//:google_rpc_code_java_proto", - "@googleapis//:google_rpc_error_details_java_proto", "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_protobuf_protobuf_java_util", @@ -31,12 +31,12 @@ java_test( "@maven//:io_grpc_grpc_api", "@maven//:io_grpc_grpc_context", "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_inprocess", "@maven//:io_grpc_grpc_netty", "@maven//:io_grpc_grpc_protobuf", "@maven//:io_grpc_grpc_stub", "@maven//:me_dinowernli_java_grpc_prometheus", "@maven//:org_mockito_mockito_core", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_grpc", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/test/java/build/buildfarm/server/services/BUILD b/src/test/java/build/buildfarm/server/services/BUILD index 09916ed7f0..7c6d1b6b1e 100644 --- a/src/test/java/build/buildfarm/server/services/BUILD +++ b/src/test/java/build/buildfarm/server/services/BUILD @@ -11,10 +11,12 @@ java_test( "//src/main/java/build/buildfarm/server/services", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", + "//third_party/remote-apis:build_bazel_remote_asset_v1_remote_asset_java_grpc", + "@maven//:com_google_guava_guava", "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_google_truth_truth", "@maven//:io_grpc_grpc_stub", "@maven//:org_mockito_mockito_core", - "@remote_apis//:build_bazel_remote_asset_v1_remote_asset_java_proto", + "@remoteapis//build/bazel/remote/asset/v1:remote_asset_java_proto", ], ) diff --git a/src/test/java/build/buildfarm/worker/BUILD b/src/test/java/build/buildfarm/worker/BUILD index e8307784b6..cbd186e922 100644 --- a/src/test/java/build/buildfarm/worker/BUILD +++ b/src/test/java/build/buildfarm/worker/BUILD @@ -17,6 +17,7 @@ java_test( plugins = ["//src/main/java/build/buildfarm/common:lombok"], test_class = "build.buildfarm.AllTests", deps = [ + "//src/main/java/build/buildfarm/cas", "//src/main/java/build/buildfarm/common", "//src/main/java/build/buildfarm/common/config", "//src/main/java/build/buildfarm/instance", @@ -24,7 +25,7 @@ java_test( "//src/main/java/build/buildfarm/worker/resources", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", - "@googleapis//:google_rpc_code_java_proto", + "@com_google_googleapis//google/rpc:rpc_java_proto", "@maven//:com_github_jnr_jnr_constants", "@maven//:com_github_jnr_jnr_ffi", "@maven//:com_github_serceman_jnr_fuse", @@ -38,6 +39,6 @@ java_test( "@maven//:io_grpc_grpc_protobuf", "@maven//:org_mockito_mockito_core", "@maven//:org_projectlombok_lombok", - "@remote_apis//:build_bazel_remote_execution_v2_remote_execution_java_proto", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", ], ) diff --git a/src/test/java/build/buildfarm/worker/DequeueMatchEvaluatorTest.java b/src/test/java/build/buildfarm/worker/DequeueMatchEvaluatorTest.java index bc7acd00a4..91f83872ca 100644 --- a/src/test/java/build/buildfarm/worker/DequeueMatchEvaluatorTest.java +++ b/src/test/java/build/buildfarm/worker/DequeueMatchEvaluatorTest.java @@ -14,13 +14,19 @@ package build.buildfarm.worker; +import static build.buildfarm.common.ExecutionProperties.CORES; +import static build.buildfarm.common.ExecutionProperties.MAX_CORES; +import static build.buildfarm.common.ExecutionProperties.MIN_CORES; +import static build.buildfarm.worker.DequeueMatchEvaluator.shouldKeepOperation; import static com.google.common.truth.Truth.assertThat; import build.bazel.remote.execution.v2.Platform; import build.buildfarm.common.config.BuildfarmConfigs; import build.buildfarm.v1test.QueueEntry; +import build.buildfarm.worker.resources.LocalResourceSet; import com.google.common.collect.HashMultimap; import com.google.common.collect.SetMultimap; +import java.util.concurrent.Semaphore; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -52,10 +58,11 @@ public class DequeueMatchEvaluatorTest { public void shouldKeepOperationKeepEmptyQueueEntry() throws Exception { // ARRANGE SetMultimap workerProvisions = HashMultimap.create(); + LocalResourceSet resourceSet = new LocalResourceSet(); QueueEntry entry = QueueEntry.newBuilder().setPlatform(Platform.newBuilder()).build(); // ACT - boolean shouldKeep = DequeueMatchEvaluator.shouldKeepOperation(workerProvisions, entry); + boolean shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, entry); // ASSERT assertThat(shouldKeep).isTrue(); @@ -70,18 +77,33 @@ public void shouldKeepOperationKeepEmptyQueueEntry() throws Exception { public void shouldKeepOperationValidMinCoresQueueEntry() throws Exception { // ARRANGE SetMultimap workerProvisions = HashMultimap.create(); - workerProvisions.put("cores", "11"); + LocalResourceSet resourceSet = new LocalResourceSet(); + workerProvisions.put(CORES, "11"); - QueueEntry entry = + QueueEntry minCoresEntry = QueueEntry.newBuilder() .setPlatform( Platform.newBuilder() .addProperties( - Platform.Property.newBuilder().setName("min-cores").setValue("10"))) + Platform.Property.newBuilder().setName(MIN_CORES).setValue("10"))) + .build(); + + // ACT + boolean shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, minCoresEntry); + + // ASSERT + // the worker accepts because it has more cores than the min-cores requested + assertThat(shouldKeep).isTrue(); + + QueueEntry coresEntry = + QueueEntry.newBuilder() + .setPlatform( + Platform.newBuilder() + .addProperties(Platform.Property.newBuilder().setName(CORES).setValue("10"))) .build(); // ACT - boolean shouldKeep = DequeueMatchEvaluator.shouldKeepOperation(workerProvisions, entry); + shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, coresEntry); // ASSERT // the worker accepts because it has more cores than the min-cores requested @@ -96,20 +118,34 @@ public void shouldKeepOperationValidMinCoresQueueEntry() throws Exception { @Test public void shouldKeepOperationInvalidMinCoresQueueEntry() throws Exception { // ARRANGE - configs.getWorker().getDequeueMatchSettings().setAcceptEverything(false); SetMultimap workerProvisions = HashMultimap.create(); - workerProvisions.put("cores", "10"); + LocalResourceSet resourceSet = new LocalResourceSet(); + workerProvisions.put(CORES, "10"); - QueueEntry entry = + QueueEntry minCoresEntry = QueueEntry.newBuilder() .setPlatform( Platform.newBuilder() .addProperties( - Platform.Property.newBuilder().setName("min-cores").setValue("11"))) + Platform.Property.newBuilder().setName(MIN_CORES).setValue("11"))) .build(); // ACT - boolean shouldKeep = DequeueMatchEvaluator.shouldKeepOperation(workerProvisions, entry); + boolean shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, minCoresEntry); + + // ASSERT + // the worker rejects because it has less cores than the min-cores requested + assertThat(shouldKeep).isFalse(); + + QueueEntry coresEntry = + QueueEntry.newBuilder() + .setPlatform( + Platform.newBuilder() + .addProperties(Platform.Property.newBuilder().setName(CORES).setValue("11"))) + .build(); + + // ACT + shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, coresEntry); // ASSERT // the worker rejects because it has less cores than the min-cores requested @@ -123,24 +159,40 @@ public void shouldKeepOperationInvalidMinCoresQueueEntry() throws Exception { public void shouldKeepOperationMaxCoresDoNotInfluenceAcceptance() throws Exception { // ARRANGE SetMultimap workerProvisions = HashMultimap.create(); - workerProvisions.put("cores", "10"); + LocalResourceSet resourceSet = new LocalResourceSet(); + workerProvisions.put(CORES, "10"); - QueueEntry entry = + QueueEntry minCoresEntry = QueueEntry.newBuilder() .setPlatform( Platform.newBuilder() + .addProperties(Platform.Property.newBuilder().setName(MIN_CORES).setValue("10")) .addProperties( - Platform.Property.newBuilder().setName("min-cores").setValue("10")) - .addProperties( - Platform.Property.newBuilder().setName("max-cores").setValue("20"))) + Platform.Property.newBuilder().setName(MAX_CORES).setValue("20"))) .build(); // ACT - boolean shouldKeep = DequeueMatchEvaluator.shouldKeepOperation(workerProvisions, entry); + boolean shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, minCoresEntry); // ASSERT // the worker accepts because it has the same cores as the min-cores requested assertThat(shouldKeep).isTrue(); + + QueueEntry coresEntry = + QueueEntry.newBuilder() + .setPlatform( + Platform.newBuilder() + .addProperties(Platform.Property.newBuilder().setName(CORES).setValue("10")) + .addProperties( + Platform.Property.newBuilder().setName(MAX_CORES).setValue("20"))) + .build(); + + // ACT + shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, coresEntry); + + // ASSERT + // the worker accepts because it has the same cores as the cores requested + assertThat(shouldKeep).isTrue(); } // Function under test: shouldKeepOperation @@ -150,9 +202,9 @@ public void shouldKeepOperationMaxCoresDoNotInfluenceAcceptance() throws Excepti @Test public void shouldKeepOperationUnmatchedPropertiesRejectionAcceptance() throws Exception { // ARRANGE - configs.getWorker().getDequeueMatchSettings().setAcceptEverything(false); configs.getWorker().getDequeueMatchSettings().setAllowUnmatched(false); SetMultimap workerProvisions = HashMultimap.create(); + LocalResourceSet resourceSet = new LocalResourceSet(); QueueEntry entry = QueueEntry.newBuilder() @@ -163,27 +215,204 @@ public void shouldKeepOperationUnmatchedPropertiesRejectionAcceptance() throws E .build(); // ACT - boolean shouldKeep = DequeueMatchEvaluator.shouldKeepOperation(workerProvisions, entry); + boolean shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, entry); // ASSERT assertThat(shouldKeep).isFalse(); // ARRANGE - configs.getWorker().getDequeueMatchSettings().setAcceptEverything(true); + configs.getWorker().getDequeueMatchSettings().setAllowUnmatched(true); // ACT - shouldKeep = DequeueMatchEvaluator.shouldKeepOperation(workerProvisions, entry); + shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, entry); // ASSERT assertThat(shouldKeep).isTrue(); + } + // Function under test: shouldKeepOperation + // Reason for testing: the local resource should be claimed + // Failure explanation: semaphore claim did not work as expected. + @Test + public void shouldKeepOperationClaimsResource() throws Exception { // ARRANGE configs.getWorker().getDequeueMatchSettings().setAllowUnmatched(true); + SetMultimap workerProvisions = HashMultimap.create(); + LocalResourceSet resourceSet = new LocalResourceSet(); + resourceSet.resources.put("FOO", new Semaphore(1)); + + QueueEntry entry = + QueueEntry.newBuilder() + .setPlatform( + Platform.newBuilder() + .addProperties( + Platform.Property.newBuilder().setName("resource:FOO").setValue("1"))) + .build(); + + // PRE-ASSERT + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(1); // ACT - shouldKeep = DequeueMatchEvaluator.shouldKeepOperation(workerProvisions, entry); + boolean shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, entry); // ASSERT + // the worker accepts because the resource is available. assertThat(shouldKeep).isTrue(); + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(0); + + // ACT + shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, entry); + + // ASSERT + // the worker rejects because there are no resources left. + assertThat(shouldKeep).isFalse(); + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(0); + } + + // Function under test: shouldKeepOperation + // Reason for testing: the local resource should be claimed + // Failure explanation: semaphore claim did not work as expected. + @Test + public void rejectOperationIgnoresResource() throws Exception { + // ARRANGE + configs.getWorker().getDequeueMatchSettings().setAllowUnmatched(false); + SetMultimap workerProvisions = HashMultimap.create(); + LocalResourceSet resourceSet = new LocalResourceSet(); + resourceSet.resources.put("FOO", new Semaphore(1)); + + QueueEntry entry = + QueueEntry.newBuilder() + .setPlatform( + Platform.newBuilder() + .addProperties( + Platform.Property.newBuilder().setName("resource:FOO").setValue("1")) + .addProperties(Platform.Property.newBuilder().setName("os").setValue("randos"))) + .build(); + + // PRE-ASSERT + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(1); + + // ACT + boolean shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, entry); + + // ASSERT + // the worker rejects because the os is not satisfied + assertThat(shouldKeep).isFalse(); + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(1); + } + + // Function under test: shouldKeepOperation + // Reason for testing: the local resources should be claimed + // Failure explanation: semaphore claim did not work as expected. + @Test + public void shouldKeepOperationClaimsMultipleResource() throws Exception { + // ARRANGE + configs.getWorker().getDequeueMatchSettings().setAllowUnmatched(true); + SetMultimap workerProvisions = HashMultimap.create(); + LocalResourceSet resourceSet = new LocalResourceSet(); + resourceSet.resources.put("FOO", new Semaphore(2)); + resourceSet.resources.put("BAR", new Semaphore(4)); + + QueueEntry entry = + QueueEntry.newBuilder() + .setPlatform( + Platform.newBuilder() + .addProperties( + Platform.Property.newBuilder().setName("resource:FOO").setValue("1")) + .addProperties( + Platform.Property.newBuilder().setName("resource:BAR").setValue("2"))) + .build(); + + // PRE-ASSERT + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(2); + assertThat(resourceSet.resources.get("BAR").availablePermits()).isEqualTo(4); + + // ACT + boolean shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, entry); + + // ASSERT + // the worker accepts because the resource is available. + assertThat(shouldKeep).isTrue(); + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(1); + assertThat(resourceSet.resources.get("BAR").availablePermits()).isEqualTo(2); + + // ACT + shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, entry); + + // ASSERT + // the worker accepts because the resource is available. + assertThat(shouldKeep).isTrue(); + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(0); + assertThat(resourceSet.resources.get("BAR").availablePermits()).isEqualTo(0); + + // ACT + shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, entry); + + // ASSERT + // the worker rejects because there are no resources left. + assertThat(shouldKeep).isFalse(); + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(0); + assertThat(resourceSet.resources.get("BAR").availablePermits()).isEqualTo(0); + } + + // Function under test: shouldKeepOperation + // Reason for testing: the local resources should fail to claim, and the existing amount should be + // the same. + // Failure explanation: semaphore claim did not work as expected. + @Test + public void shouldKeepOperationFailsToClaimSameAmountRemains() throws Exception { + // ARRANGE + configs.getWorker().getDequeueMatchSettings().setAllowUnmatched(true); + SetMultimap workerProvisions = HashMultimap.create(); + LocalResourceSet resourceSet = new LocalResourceSet(); + resourceSet.resources.put("FOO", new Semaphore(50)); + resourceSet.resources.put("BAR", new Semaphore(100)); + resourceSet.resources.put("BAZ", new Semaphore(200)); + + QueueEntry entry = + QueueEntry.newBuilder() + .setPlatform( + Platform.newBuilder() + .addProperties( + Platform.Property.newBuilder().setName("resource:FOO").setValue("20")) + .addProperties( + Platform.Property.newBuilder().setName("resource:BAR").setValue("101")) + .addProperties( + Platform.Property.newBuilder().setName("resource:BAZ").setValue("20"))) + .build(); + + // PRE-ASSERT + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(50); + assertThat(resourceSet.resources.get("BAR").availablePermits()).isEqualTo(100); + assertThat(resourceSet.resources.get("BAZ").availablePermits()).isEqualTo(200); + + // ACT + boolean shouldKeep = shouldKeepOperation(workerProvisions, resourceSet, entry); + + // ASSERT + // the worker rejects because there are no resources left. + // The same amount are returned. + assertThat(shouldKeep).isFalse(); + assertThat(resourceSet.resources.get("FOO").availablePermits()).isEqualTo(50); + assertThat(resourceSet.resources.get("BAR").availablePermits()).isEqualTo(100); + assertThat(resourceSet.resources.get("BAZ").availablePermits()).isEqualTo(200); + } + + @Test + public void shouldMatchCoresAsMinAndMax() throws Exception { + SetMultimap workerProvisions = HashMultimap.create(); + LocalResourceSet resourceSet = new LocalResourceSet(); + configs.getWorker().getDequeueMatchSettings().setAllowUnmatched(false); + + QueueEntry multicoreEntry = + QueueEntry.newBuilder() + .setPlatform( + Platform.newBuilder() + .addProperties(Platform.Property.newBuilder().setName(CORES).setValue("2")) + .build()) + .build(); + + // cores must be present from worker provisions to keep cores specified in platform + assertThat(shouldKeepOperation(workerProvisions, resourceSet, multicoreEntry)).isFalse(); } } diff --git a/src/test/java/build/buildfarm/worker/ExecuteActionStageTest.java b/src/test/java/build/buildfarm/worker/ExecuteActionStageTest.java index 284c08d34d..c22178c4f9 100644 --- a/src/test/java/build/buildfarm/worker/ExecuteActionStageTest.java +++ b/src/test/java/build/buildfarm/worker/ExecuteActionStageTest.java @@ -44,7 +44,7 @@ public void errorPathDestroysExecDir() throws Exception { .setExecDir(Paths.get("error-operation-path")) .build(); - PipelineStage executeActionStage = new ExecuteActionStage(context, /* output=*/ null, error); + PipelineStage executeActionStage = new ExecuteActionStage(context, /* output= */ null, error); executeActionStage.error().put(errorContext); verify(context, times(1)).destroyExecDir(errorContext.execDir); verify(error, times(1)).put(errorContext); diff --git a/src/test/java/build/buildfarm/worker/FuseCASTest.java b/src/test/java/build/buildfarm/worker/FuseCASTest.java index 98534fecf5..5d1319acaa 100644 --- a/src/test/java/build/buildfarm/worker/FuseCASTest.java +++ b/src/test/java/build/buildfarm/worker/FuseCASTest.java @@ -15,6 +15,7 @@ package build.buildfarm.worker; import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; @@ -29,9 +30,7 @@ import jnr.ffi.provider.DelegatingMemoryIO; import jnr.ffi.provider.converters.StringResultConverter; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import ru.serce.jnrfuse.ErrorCodes; @@ -42,8 +41,6 @@ public class FuseCASTest { private FuseCAS fuseCAS; - @Rule public final ExpectedException exception = ExpectedException.none(); - private final ByteString content = ByteString.copyFromUtf8("Peanut Butter"); @Before @@ -97,30 +94,34 @@ public void createInputRootMakesDirectory() throws IOException, InterruptedExcep assertThat(fuseCAS.getattr("/test", createFileStat())).isEqualTo(0); } - @Test + @Test(expected = IllegalArgumentException.class) public void createInputRootEmptyTopdirThrows() throws IOException, InterruptedException { - exception.expect(IllegalArgumentException.class); fuseCAS.createInputRoot("", Digest.newBuilder().build()); } - @Test + @Test(expected = IllegalArgumentException.class) public void createInputRootEmptyAfterSlashes() throws IOException, InterruptedException { - exception.expect(IllegalArgumentException.class); fuseCAS.createInputRoot("///", Digest.newBuilder().build()); } @Test public void createInputRootFileAsDirectoryThrows() throws IOException, InterruptedException { fuseCAS.createInputRoot("test", Digest.newBuilder().setHash("/test").build()); - exception.expect(IllegalArgumentException.class); - fuseCAS.createInputRoot("test/file/subdir", Digest.newBuilder().build()); + assertThrows( + IllegalArgumentException.class, + () -> { + fuseCAS.createInputRoot("test/file/subdir", Digest.newBuilder().build()); + }); } @Test public void createInputRootEmptyComponentsIgnored() throws IOException, InterruptedException { fuseCAS.createInputRoot("/test/", Digest.newBuilder().setHash("/test").build()); - exception.expect(IllegalArgumentException.class); - fuseCAS.createInputRoot("test/file/subdir", Digest.newBuilder().build()); + assertThrows( + IllegalArgumentException.class, + () -> { + fuseCAS.createInputRoot("test/file/subdir", Digest.newBuilder().build()); + }); } @Test @@ -370,14 +371,14 @@ public void writeExtendsAndOverwrites() { ByteString data = ByteString.copyFromUtf8("Hello, World\n"); Pointer buf = pointerFromByteString(data); - assertThat(fuseCAS.write("/foo", buf, data.size(), /* offset=*/ 0, fi)).isEqualTo(data.size()); + assertThat(fuseCAS.write("/foo", buf, data.size(), /* offset= */ 0, fi)).isEqualTo(data.size()); FileStat fileStat = createFileStat(); fuseCAS.getattr("/foo", fileStat); assertThat(fileStat.st_size.longValue()).isEqualTo(data.size()); ByteString overwriteData = ByteString.copyFromUtf8("Goodbye"); Pointer overwriteBuf = pointerFromByteString(overwriteData); - fuseCAS.write("/foo", overwriteBuf, overwriteData.size(), /* offset=*/ 0, fi); + fuseCAS.write("/foo", overwriteBuf, overwriteData.size(), /* offset= */ 0, fi); fuseCAS.getattr("/foo", fileStat); assertThat(fileStat.st_size.longValue()).isEqualTo(data.size()); } @@ -403,7 +404,8 @@ public void readAtEndIsEmpty() { FuseFileInfo fi = new SystemFuseFileInfo(); //noinspection OctalInteger fuseCAS.create("/foo", 0644, fi); - assertThat(fuseCAS.read("/foo", /* buf=*/ null, /* size=*/ 1, /* offset=*/ 0, fi)).isEqualTo(0); + assertThat(fuseCAS.read("/foo", /* buf= */ null, /* size= */ 1, /* offset= */ 0, fi)) + .isEqualTo(0); } @Test @@ -414,12 +416,12 @@ public void readWritten() { ByteString data = ByteString.copyFromUtf8("Hello, World\n"); Pointer buf = pointerFromByteString(data); - fuseCAS.write("/foo", buf, data.size(), /* offset=*/ 0, fi); + fuseCAS.write("/foo", buf, data.size(), /* offset= */ 0, fi); byte[] readData = new byte[5]; u8[] array = Struct.arrayOf(Runtime.getSystemRuntime(), u8.class, readData.length); Pointer readBuf = ((DelegatingMemoryIO) Struct.getMemory(array[0])).getDelegatedMemoryIO(); - assertThat(fuseCAS.read("/foo", readBuf, /* size=*/ readData.length, /* offset=*/ 7, fi)) + assertThat(fuseCAS.read("/foo", readBuf, /* size= */ readData.length, /* offset= */ 7, fi)) .isEqualTo(readData.length); readBuf.get(0, readData, 0, readData.length); assertThat(new String(readData, 0)).isEqualTo("World"); @@ -435,7 +437,8 @@ public void readInputRooted() throws IOException, InterruptedException { fi.flags.set(0); assertThat(fuseCAS.open("/test/file", fi)).isEqualTo(0); assertThat( - fuseCAS.read("/test/file", /* buf=*/ buf, /* size=*/ data.length, /* offset=*/ 0, fi)) + fuseCAS.read( + "/test/file", /* buf= */ buf, /* size= */ data.length, /* offset= */ 0, fi)) .isEqualTo(data.length); buf.get(0, data, 0, data.length); assertThat(new String(data, 0)).isEqualTo("Peanut"); @@ -447,8 +450,8 @@ public void fallocatePunchHole() { fuseCAS.fallocate( "/op_not_supp", /* FALLOC_FL_PUNCH_HOLE */ 2, - /* off=*/ -1, - /* length=*/ -1, + /* off= */ -1, + /* length= */ -1, new SystemFuseFileInfo())) .isEqualTo(-ErrorCodes.EOPNOTSUPP()); } @@ -458,7 +461,7 @@ public void fallocateDirectory() { fuseCAS.mkdir("/foo", 755); assertThat( fuseCAS.fallocate( - "/foo", /* mode=*/ 0, /* off=*/ -1, /* length=*/ -1, new SystemFuseFileInfo())) + "/foo", /* mode= */ 0, /* off= */ -1, /* length= */ -1, new SystemFuseFileInfo())) .isEqualTo(-ErrorCodes.EISDIR()); } @@ -468,9 +471,9 @@ public void fallocateReadOnly() throws IOException, InterruptedException { assertThat( fuseCAS.fallocate( "/test/file", - /* mode=*/ 0, - /* off=*/ -1, - /* length=*/ -1, + /* mode= */ 0, + /* off= */ -1, + /* length= */ -1, new SystemFuseFileInfo())) .isEqualTo(-ErrorCodes.EPERM()); } @@ -481,7 +484,7 @@ public void fallocateResize() { fuseCAS.create("/foo", 0644, new SystemFuseFileInfo()); assertThat( fuseCAS.fallocate( - "/foo", /* mode=*/ 0, /* off=*/ 0, /* length=*/ 1024, new SystemFuseFileInfo())) + "/foo", /* mode= */ 0, /* off= */ 0, /* length= */ 1024, new SystemFuseFileInfo())) .isEqualTo(0); FileStat fileStat = createFileStat(); fuseCAS.getattr("/foo", fileStat); diff --git a/src/test/java/build/buildfarm/worker/InputFetcherTest.java b/src/test/java/build/buildfarm/worker/InputFetcherTest.java new file mode 100644 index 0000000000..eac2abdd67 --- /dev/null +++ b/src/test/java/build/buildfarm/worker/InputFetcherTest.java @@ -0,0 +1,135 @@ +// Copyright 2018 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker; + +import static build.buildfarm.common.Errors.VIOLATION_TYPE_MISSING; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.truth.Truth.assertThat; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import build.bazel.remote.execution.v2.Action; +import build.bazel.remote.execution.v2.Command; +import build.bazel.remote.execution.v2.Digest; +import build.bazel.remote.execution.v2.Directory; +import build.bazel.remote.execution.v2.ExecuteResponse; +import build.buildfarm.cas.cfc.PutDirectoryException; +import build.buildfarm.v1test.ExecuteEntry; +import build.buildfarm.v1test.QueueEntry; +import build.buildfarm.v1test.QueuedOperation; +import build.buildfarm.worker.ExecDirException.ViolationException; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.rpc.Code; +import com.google.rpc.DebugInfo; +import com.google.rpc.Help; +import com.google.rpc.LocalizedMessage; +import com.google.rpc.PreconditionFailure; +import com.google.rpc.RequestInfo; +import com.google.rpc.ResourceInfo; +import com.google.rpc.Status; +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class InputFetcherTest { + @Test + public void onlyMissingFilesIsViolationMissingFailedPrecondition() throws Exception { + PipelineStage error = mock(PipelineStage.class); + Operation operation = Operation.newBuilder().setName("missing-inputs").build(); + ExecuteEntry executeEntry = + ExecuteEntry.newBuilder().setOperationName(operation.getName()).build(); + QueueEntry queueEntry = QueueEntry.newBuilder().setExecuteEntry(executeEntry).build(); + OperationContext operationContext = + OperationContext.newBuilder().setQueueEntry(queueEntry).setOperation(operation).build(); + Command command = Command.newBuilder().addArguments("/bin/false").build(); + QueuedOperation queuedOperation = QueuedOperation.newBuilder().setCommand(command).build(); + AtomicReference failedOperationRef = new AtomicReference<>(); + WorkerContext workerContext = + new StubWorkerContext() { + @Override + public QueuedOperation getQueuedOperation(QueueEntry queueEntry) { + return queuedOperation; + } + + @Override + public boolean putOperation(Operation operation) { + return failedOperationRef.compareAndSet(null, operation); + } + + @Override + public Path createExecDir( + String operationName, + Map directoriesIndex, + Action action, + Command command) + throws IOException { + Path root = Paths.get(operationName); + throw new ExecDirException( + Paths.get(operationName), + ImmutableList.of( + new ViolationException( + Digest.getDefaultInstance(), + root.resolve("input"), + /* isExecutable= */ false, + new NoSuchFileException("input-digest")), + new PutDirectoryException( + root.resolve("dir"), + Digest.getDefaultInstance(), + ImmutableList.of(new NoSuchFileException("dir/input-digest"))))); + } + + @Override + public int getInputFetchStageWidth() { + return 1; + } + }; + InputFetchStage owner = new InputFetchStage(workerContext, /* output= */ null, error); + InputFetcher inputFetcher = new InputFetcher(workerContext, operationContext, owner); + inputFetcher.fetchPolled(/* stopwatch= */ null); + Operation failedOperation = checkNotNull(failedOperationRef.get()); + verify(error, times(1)).put(any(OperationContext.class)); + ExecuteResponse executeResponse = failedOperation.getResponse().unpack(ExecuteResponse.class); + Status status = executeResponse.getStatus(); + assertThat(status.getCode()).isEqualTo(Code.FAILED_PRECONDITION.getNumber()); + for (Any detail : status.getDetailsList()) { + if (!(detail.is(DebugInfo.class) + || detail.is(Help.class) + || detail.is(LocalizedMessage.class) + || detail.is(RequestInfo.class) + || detail.is(ResourceInfo.class))) { + assertThat(detail.is(PreconditionFailure.class)).isTrue(); + PreconditionFailure preconditionFailure = detail.unpack(PreconditionFailure.class); + assertThat(preconditionFailure.getViolationsCount()).isGreaterThan(0); + assertThat( + Iterables.all( + preconditionFailure.getViolationsList(), + violation -> violation.getType().equals(VIOLATION_TYPE_MISSING))) + .isTrue(); + } + } + } +} diff --git a/src/test/java/build/buildfarm/worker/PipelineStageTest.java b/src/test/java/build/buildfarm/worker/PipelineStageTest.java index ef49bab367..c89ed66a50 100644 --- a/src/test/java/build/buildfarm/worker/PipelineStageTest.java +++ b/src/test/java/build/buildfarm/worker/PipelineStageTest.java @@ -96,7 +96,8 @@ OperationContext take() { }; Thread stageThread = new Thread(stage); stageThread.start(); - while (count.get() != 1) ; + while (count.get() != 1) + ; stage.cancelTick(); stageThread.join(); assertThat(count.get()).isEqualTo(2); diff --git a/src/test/java/build/buildfarm/worker/PipelineTest.java b/src/test/java/build/buildfarm/worker/PipelineTest.java index 713f1b0345..866f6f19e3 100644 --- a/src/test/java/build/buildfarm/worker/PipelineTest.java +++ b/src/test/java/build/buildfarm/worker/PipelineTest.java @@ -14,6 +14,11 @@ package build.buildfarm.worker; +import static com.google.common.truth.Truth.assertThat; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.logging.Logger; import lombok.extern.java.Log; import org.junit.Test; @@ -53,7 +58,7 @@ public void stageThreadReturnCompletesJoin() throws InterruptedException { public void run() {} }, 1); - pipeline.start(null); + pipeline.start(); pipeline.join(); } @@ -68,7 +73,97 @@ public void run() { } }, 1); - pipeline.start(null); + pipeline.start(); + pipeline.join(); + } + + // Create a test stage that exists because of an interrupt. + // This proves the stage can be interupted. + public class TestStage extends PipelineStage { + public TestStage(String name) { + super(name, null, null, null); + } + + @Override + protected void runInterruptible() throws InterruptedException { + throw new InterruptedException("Interrupt"); + } + + @Override + public void put(OperationContext operationContext) throws InterruptedException {} + + @Override + OperationContext take() { + throw new UnsupportedOperationException(); + } + + @Override + public Logger getLogger() { + return log; + } + } + + // This test demonstrates that the stage will end and the pipeline will finish because it was + // interrupted. + @Test + public void stageExitsOnInterrupt() throws InterruptedException { + Pipeline pipeline = new Pipeline(); + TestStage stage = new TestStage("test"); + pipeline.add(stage, 1); + pipeline.start(); pipeline.join(); } + + // Create a test stage that doesn't exit because of an a non-interrupt exception. + // This proves the stage is robust enough continue running when experiencing an exception. + public class ContinueStage extends PipelineStage { + public ContinueStage(String name) { + super(name, null, null, null); + } + + @Override + protected void runInterruptible() throws InterruptedException { + throw new RuntimeException("Exception"); + } + + @Override + public void put(OperationContext operationContext) throws InterruptedException {} + + @Override + OperationContext take() { + throw new UnsupportedOperationException(); + } + + @Override + public Logger getLogger() { + return log; + } + } + + // This test demonstrates that the stage will NOT end and the pipeline will NOT finish because a + // non-interrupt exception was thrown. + @Test + public void stageContinuesOnException() throws InterruptedException { + Pipeline pipeline = new Pipeline(); + ContinueStage stage = new ContinueStage("test"); + pipeline.add(stage, 1); + pipeline.start(); + + boolean didNotThrow = false; + try { + CompletableFuture.runAsync( + () -> { + try { + pipeline.join(); + } catch (InterruptedException e) { + } + return; + }) + .get(1, TimeUnit.SECONDS); + } catch (TimeoutException e) { + didNotThrow = true; + } catch (Exception e) { + } + assertThat(didNotThrow).isTrue(); + } } diff --git a/src/test/java/build/buildfarm/worker/ReportResultStageTest.java b/src/test/java/build/buildfarm/worker/ReportResultStageTest.java index 2586e0dc33..b5f2da06d8 100644 --- a/src/test/java/build/buildfarm/worker/ReportResultStageTest.java +++ b/src/test/java/build/buildfarm/worker/ReportResultStageTest.java @@ -57,7 +57,7 @@ static class SingleOutputSink extends PipelineStage { OperationContext operationContext = null; public SingleOutputSink() { - super("SingleOutputSink", /* workerContext=*/ null, /* output=*/ null, /* error=*/ null); + super("SingleOutputSink", /* workerContext= */ null, /* output= */ null, /* error= */ null); } @Override @@ -110,7 +110,7 @@ public void execDirDestroyedAfterComplete() throws Exception { .build(); when(context.putOperation(any(Operation.class))).thenReturn(true); - PipelineStage reportResultStage = new ReportResultStage(context, output, /* error=*/ null); + PipelineStage reportResultStage = new ReportResultStage(context, output, /* error= */ null); reportResultStage.put(reportedContext); reportResultStage.run(); verify(context, times(1)).destroyExecDir(reportedContext.execDir); @@ -159,7 +159,7 @@ public void operationErrorOnStatusException() throws Exception { eq(erroringContext.execDir), eq(Command.getDefaultInstance())); - PipelineStage reportResultStage = new ReportResultStage(context, output, /* error=*/ null); + PipelineStage reportResultStage = new ReportResultStage(context, output, /* error= */ null); reportResultStage.put(erroringContext); reportResultStage.run(); verify(context, times(1)).destroyExecDir(erroringContext.execDir); diff --git a/src/test/java/build/buildfarm/worker/StubWorkerContext.java b/src/test/java/build/buildfarm/worker/StubWorkerContext.java index 1b0272a0b6..7ccf1182d3 100644 --- a/src/test/java/build/buildfarm/worker/StubWorkerContext.java +++ b/src/test/java/build/buildfarm/worker/StubWorkerContext.java @@ -34,6 +34,7 @@ import com.google.longrunning.Operation; import com.google.protobuf.Duration; import io.grpc.Deadline; +import java.io.IOException; import java.nio.file.Path; import java.util.List; import java.util.Map; @@ -142,10 +143,8 @@ public QueuedOperation getQueuedOperation(QueueEntry queueEntry) { @Override public Path createExecDir( - String operationName, - Map directoriesIndex, - Action action, - Command command) { + String operationName, Map directoriesIndex, Action action, Command command) + throws IOException, InterruptedException { throw new UnsupportedOperationException(); } @@ -223,4 +222,9 @@ public ResourceLimits commandExecutionSettings(Command command) { public boolean shouldErrorOperationOnRemainingResources() { throw new UnsupportedOperationException(); } + + @Override + public void returnLocalResources(QueueEntry queueEntry) { + throw new UnsupportedOperationException(); + } } diff --git a/src/test/java/build/buildfarm/worker/SuperscalarPipelineStageTest.java b/src/test/java/build/buildfarm/worker/SuperscalarPipelineStageTest.java index 60cb0d8be9..5ded1c590c 100644 --- a/src/test/java/build/buildfarm/worker/SuperscalarPipelineStageTest.java +++ b/src/test/java/build/buildfarm/worker/SuperscalarPipelineStageTest.java @@ -72,12 +72,17 @@ protected int claimsRequired(OperationContext operationContext) { boolean isFull() { return claims.size() == width; } + + @Override + public int getSlotUsage() { + return 0; + } } @Test public void interruptedClaimReleasesPartial() throws InterruptedException { AbstractSuperscalarPipelineStage stage = - new AbstractSuperscalarPipelineStage("too-narrow", /* output=*/ null, /* width=*/ 3) { + new AbstractSuperscalarPipelineStage("too-narrow", /* output= */ null, /* width= */ 3) { @Override protected int claimsRequired(OperationContext operationContext) { return 5; @@ -102,7 +107,7 @@ protected int claimsRequired(OperationContext operationContext) { // start a thread, when the stage is exhausted, interrupt this one try { - stage.claim(/* operationContext=*/ null); + stage.claim(/* operationContext= */ null); fail("should not get here"); } catch (InterruptedException e) { // ignore @@ -121,7 +126,7 @@ public void takeReleasesQueueClaims() throws InterruptedException { BlockingQueue queue = new ArrayBlockingQueue<>(1); PipelineStage output = new PipelineStageTest.StubPipelineStage("unclosed-sink"); PipelineStage stage = - new AbstractSuperscalarPipelineStage("queue-claimed", output, /* width=*/ 3) { + new AbstractSuperscalarPipelineStage("queue-claimed", output, /* width= */ 3) { @Override protected int claimsRequired(OperationContext operationContext) { return 2; diff --git a/src/test/java/build/buildfarm/worker/persistent/BUILD b/src/test/java/build/buildfarm/worker/persistent/BUILD new file mode 100644 index 0000000000..b7d0c4d056 --- /dev/null +++ b/src/test/java/build/buildfarm/worker/persistent/BUILD @@ -0,0 +1,36 @@ +java_test( + name = "tests", + size = "small", + srcs = glob(["*.java"]), + test_class = "build.buildfarm.AllTests", + deps = [ + "//persistentworkers/src/main/java/persistent/bazel:bazel-persistent-workers", + "//persistentworkers/src/main/java/persistent/common:persistent-common", + "//persistentworkers/src/main/java/persistent/common/util", + "//persistentworkers/src/main/protobuf:worker_protocol_java_proto", + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/config", + "//src/main/java/build/buildfarm/instance", + "//src/main/java/build/buildfarm/worker", + "//src/main/java/build/buildfarm/worker/persistent", + "//src/main/java/build/buildfarm/worker/resources", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "//src/test/java/build/buildfarm:test_runner", + "//src/test/java/build/buildfarm/worker/util:worker_test_utils", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@maven//:com_github_jnr_jnr_constants", + "@maven//:com_github_jnr_jnr_ffi", + "@maven//:com_github_serceman_jnr_fuse", + "@maven//:com_google_guava_guava", + "@maven//:com_google_jimfs_jimfs", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_truth_truth", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_context", + "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:org_mockito_mockito_core", + "@maven//:org_projectlombok_lombok", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) diff --git a/src/test/java/build/buildfarm/worker/persistent/ProtoCoordinatorTest.java b/src/test/java/build/buildfarm/worker/persistent/ProtoCoordinatorTest.java new file mode 100644 index 0000000000..9f6db2609d --- /dev/null +++ b/src/test/java/build/buildfarm/worker/persistent/ProtoCoordinatorTest.java @@ -0,0 +1,131 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.persistent; + +import build.bazel.remote.execution.v2.Command; +import build.buildfarm.v1test.Tree; +import build.buildfarm.worker.util.WorkerTestUtils; +import build.buildfarm.worker.util.WorkerTestUtils.TreeFile; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import com.google.devtools.build.lib.worker.WorkerProtocol.Input; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import persistent.bazel.client.PersistentWorker; +import persistent.bazel.client.WorkerKey; + +@RunWith(JUnit4.class) +public class ProtoCoordinatorTest { + private WorkerKey makeWorkerKey( + WorkFilesContext ctx, WorkerInputs workerFiles, Path workRootsDir) { + return Keymaker.make( + ctx.opRoot, + workRootsDir, + ImmutableList.of("workerExecCmd"), + ImmutableList.of("workerInitArgs"), + ImmutableMap.of(), + "executionName", + workerFiles); + } + + private Path rootDir = null; + + public Path jimFsRoot() { + if (rootDir == null) { + rootDir = + Iterables.getFirst( + Jimfs.newFileSystem( + Configuration.unix().toBuilder() + .setAttributeViews("basic", "owner", "posix", "unix") + .build()) + .getRootDirectories(), + null); + } + return rootDir; + } + + @Test + public void testProtoCoordinator() throws Exception { + ProtoCoordinator pc = ProtoCoordinator.ofCommonsPool(4); + + Path fsRoot = jimFsRoot(); + Path opRoot = fsRoot.resolve("opRoot"); + assert (Files.notExists(opRoot)); + Files.createDirectory(opRoot); + + assert (Files.exists(opRoot)); + + String treeRootDir = opRoot.toString(); + List fileInputs = + ImmutableList.of( + new TreeFile("file_1", "file contents 1"), + new TreeFile("subdir/subdir_file_2", "file contents 2"), + new TreeFile("tools_dir/tool_file", "tool file contents", true), + new TreeFile("tools_dir/tool_file_2", "tool file contents 2", true)); + + Tree tree = WorkerTestUtils.makeTree(treeRootDir, fileInputs); + + Command command = WorkerTestUtils.makeCommand(); + WorkFilesContext ctx = WorkFilesContext.fromContext(opRoot, tree, command); + ImmutableList requestArgs = ImmutableList.of("reqArg1"); + + WorkerInputs workerFiles = WorkerInputs.from(ctx, requestArgs); + + for (Map.Entry entry : workerFiles.allInputs.entrySet()) { + Path file = entry.getKey(); + Files.createDirectories(file.getParent()); + Files.createFile(file); + } + + WorkerKey key = makeWorkerKey(ctx, workerFiles, fsRoot.resolve("workRootsDir")); + + Path workRoot = key.getExecRoot(); + Path toolsRoot = workRoot.resolve(PersistentWorker.TOOL_INPUT_SUBDIR); + + pc.copyToolInputsIntoWorkerToolRoot(key, workerFiles); + + assert Files.exists(workRoot); + List expectedToolInputs = new ArrayList<>(); + for (TreeFile file : fileInputs) { + if (file.isTool) { + expectedToolInputs.add(toolsRoot.resolve(file.path)); + } + } + WorkerTestUtils.assertFilesExistExactly(workRoot, expectedToolInputs); + + List expectedOpRootFiles = new ArrayList<>(); + + // Check that we move specified output files (assuming they exist) + for (String pathStr : ctx.outputFiles) { + Path file = workRoot.resolve(pathStr); + Files.createDirectories(file.getParent()); + Files.createFile(file); + expectedOpRootFiles.add(opRoot.resolve(pathStr)); + } + + pc.moveOutputsToOperationRoot(ctx, workRoot); + + WorkerTestUtils.assertFilesExistExactly(opRoot, expectedOpRootFiles); + } +} diff --git a/src/test/java/build/buildfarm/worker/resources/LocalResourceSetUtilsTest.java b/src/test/java/build/buildfarm/worker/resources/LocalResourceSetUtilsTest.java new file mode 100644 index 0000000000..94387e8834 --- /dev/null +++ b/src/test/java/build/buildfarm/worker/resources/LocalResourceSetUtilsTest.java @@ -0,0 +1,51 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.resources; + +import build.bazel.remote.execution.v2.Platform; +import build.buildfarm.v1test.QueueEntry; +import java.util.concurrent.Semaphore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * @class LocalResourceSetUtilsTest + * @brief Tests how local resources are claimed and released. + * @details Shows behaviour of local resource claims and releases. + */ +@RunWith(JUnit4.class) +public class LocalResourceSetUtilsTest { + // Function under test: releaseClaims + // Reason for testing: Show its okay to return claims that were never taken. + // Failure explanation: can't return claims that were never taken. + @Test + public void decideResourceLimitationsTestCoreSetting() throws Exception { + // ARRANGE + LocalResourceSet resourceSet = new LocalResourceSet(); + resourceSet.resources.put("FOO", new Semaphore(1)); + + QueueEntry entry = + QueueEntry.newBuilder() + .setPlatform( + Platform.newBuilder() + .addProperties( + Platform.Property.newBuilder().setName("resource:FOO").setValue("10"))) + .build(); + + // ACT + LocalResourceSetUtils.releaseClaims(entry.getPlatform(), resourceSet); + } +} diff --git a/src/test/java/build/buildfarm/worker/resources/ResourceDeciderTest.java b/src/test/java/build/buildfarm/worker/resources/ResourceDeciderTest.java index 8fa711ab5a..2f1d740b98 100644 --- a/src/test/java/build/buildfarm/worker/resources/ResourceDeciderTest.java +++ b/src/test/java/build/buildfarm/worker/resources/ResourceDeciderTest.java @@ -53,11 +53,11 @@ public void decideResourceLimitationsTestCoreSetting() throws Exception { ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -88,10 +88,10 @@ public void decideResourceLimitationsTestCoreSettingDefaultedOnNontest() throws command, "worker", defaultMaxCores, - /* onlyMulticoreTests=*/ true, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* onlyMulticoreTests= */ true, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -120,11 +120,11 @@ public void decideResourceLimitationsEnsureClaimsOne() throws Exception { ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -152,11 +152,11 @@ public void decideResourceLimitationsEnsureLimitGlobalSet() throws Exception { ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ true, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ true, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -184,11 +184,11 @@ public void decideResourceLimitationsEnsureNoLimitNoGlobalSet() throws Exception ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -216,11 +216,11 @@ public void decideResourceLimitationsEnsureClaimsAreMin() throws Exception { ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -247,11 +247,11 @@ public void decideResourceLimitationsTestMemSetting() throws Exception { ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -273,11 +273,11 @@ public void decideResourceLimitationsTestDefaultEnvironmentParse() throws Except ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -303,11 +303,11 @@ public void decideResourceLimitationsTestEmptyEnvironmentParse() throws Exceptio ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -336,11 +336,11 @@ public void decideResourceLimitationsTestSingleEnvironmentParse() throws Excepti ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -371,11 +371,11 @@ public void decideResourceLimitationsTestDoubleEnvironmentParse() throws Excepti ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -408,11 +408,11 @@ public void decideResourceLimitationsTestMalformedEnvironmentParse() throws Exce ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -437,7 +437,8 @@ public void decideResourceLimitationsTestEnvironmentMustacheResolution() throws Platform.Property.newBuilder() .setName("env-vars") .setValue( - "{\"foo\": \"{{limits.cpu.min}}\", \"bar\": \"{{limits.cpu.max}}\"}"))) + "{\"foo\": \"{{limits.cpu.min}}\", \"bar\":" + + " \"{{limits.cpu.max}}\"}"))) .build(); // ACT @@ -445,11 +446,11 @@ public void decideResourceLimitationsTestEnvironmentMustacheResolution() throws ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -480,11 +481,11 @@ public void decideResourceLimitationsTestIndividualEnvironmentVarParse() throws ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -516,11 +517,11 @@ public void decideResourceLimitationsTestTwoIndividualEnvironmentVarParse() thro ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -552,11 +553,11 @@ public void decideResourceLimitationsTestEmptyEnvironmentVarParse() throws Excep ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -589,11 +590,11 @@ public void decideResourceLimitationsTestDebugBeforeParse() throws Exception { ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -624,11 +625,11 @@ public void decideResourceLimitationsTestDebugAfterParse() throws Exception { ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -659,11 +660,11 @@ public void decideResourceLimitationsTestInvalidDebugParse() throws Exception { ResourceDecider.decideResourceLimitations( command, "worker", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -683,11 +684,11 @@ public void decideResourceLimitationsTestWorkerName() throws Exception { ResourceDecider.decideResourceLimitations( command, "foo", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -707,11 +708,11 @@ public void decideResourceLimitationsSanboxOffDefault() throws Exception { ResourceDecider.decideResourceLimitations( command, "foo", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, new SandboxSettings()); // ASSERT @@ -726,18 +727,18 @@ public void decideResourceLimitationsAlwaysUseSandbox() throws Exception { // ARRANGE Command command = Command.newBuilder().build(); SandboxSettings sandboxSettings = new SandboxSettings(); - sandboxSettings.alwaysUse = true; + sandboxSettings.alwaysUseSandbox = true; // ACT ResourceLimits limits = ResourceDecider.decideResourceLimitations( command, "foo", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, sandboxSettings); // ASSERT @@ -767,11 +768,11 @@ public void decideResourceLimitationsSandboxChosenViaBlockNetwork() throws Excep ResourceDecider.decideResourceLimitations( command, "foo", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, sandboxSettings); // ASSERT @@ -801,11 +802,11 @@ public void decideResourceLimitationsSandboxNotChosenViaBlockNetwork() throws Ex ResourceDecider.decideResourceLimitations( command, "foo", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, sandboxSettings); // ASSERT @@ -835,11 +836,11 @@ public void decideResourceLimitationsSandboxChosenViaTmpFs() throws Exception { ResourceDecider.decideResourceLimitations( command, "foo", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, sandboxSettings); // ASSERT @@ -869,11 +870,11 @@ public void decideResourceLimitationsSandboxNotChosenViaTmpFs() throws Exception ResourceDecider.decideResourceLimitations( command, "foo", - /* defaultMaxCores=*/ 0, - /* onlyMulticoreTests=*/ false, - /* limitGlobalExecution=*/ false, - /* executeStageWidth=*/ 100, - /* allowBringYourOwnContainer=*/ false, + /* defaultMaxCores= */ 0, + /* onlyMulticoreTests= */ false, + /* limitGlobalExecution= */ false, + /* executeStageWidth= */ 100, + /* allowBringYourOwnContainer= */ false, sandboxSettings); // ASSERT diff --git a/src/test/java/build/buildfarm/worker/shard/BUILD b/src/test/java/build/buildfarm/worker/shard/BUILD index b8a7b31ec6..c254e41fa4 100644 --- a/src/test/java/build/buildfarm/worker/shard/BUILD +++ b/src/test/java/build/buildfarm/worker/shard/BUILD @@ -11,6 +11,7 @@ java_test( "//src/main/java/build/buildfarm/common/config", "//src/main/java/build/buildfarm/instance", "//src/main/java/build/buildfarm/worker", + "//src/main/java/build/buildfarm/worker/resources", "//src/main/java/build/buildfarm/worker/shard", "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", "//src/test/java/build/buildfarm:test_runner", diff --git a/src/test/java/build/buildfarm/worker/shard/CFCExecFileSystemTest.java b/src/test/java/build/buildfarm/worker/shard/CFCExecFileSystemTest.java new file mode 100644 index 0000000000..8d7c0c9f64 --- /dev/null +++ b/src/test/java/build/buildfarm/worker/shard/CFCExecFileSystemTest.java @@ -0,0 +1,43 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.shard; + +import static com.google.common.truth.Truth.assertThat; + +import build.bazel.remote.execution.v2.Command; +import build.buildfarm.worker.OutputDirectory; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CFCExecFileSystemTest { + @Test + public void outputDirectoryWorkingDirectoryRelative() { + Command command = + Command.newBuilder() + .setWorkingDirectory("foo/bar") + .addOutputFiles("baz/quux") + .addOutputDirectories("nope") + .build(); + + // verification is actually here with checked contents below + // throws unless the directory is relative to the WorkingDirectory + OutputDirectory workingOutputDirectory = + CFCExecFileSystem.createOutputDirectory(command).getChild("foo").getChild("bar"); + assertThat(workingOutputDirectory.getChild("baz").isLeaf()).isTrue(); + assertThat(workingOutputDirectory.getChild("nope").isLeaf()).isFalse(); + } +} diff --git a/src/test/java/build/buildfarm/worker/shard/EmptyInputStreamFactoryTest.java b/src/test/java/build/buildfarm/worker/shard/EmptyInputStreamFactoryTest.java index ee995f7c89..5d622fb3ba 100644 --- a/src/test/java/build/buildfarm/worker/shard/EmptyInputStreamFactoryTest.java +++ b/src/test/java/build/buildfarm/worker/shard/EmptyInputStreamFactoryTest.java @@ -39,7 +39,7 @@ public void emptyDigestIsNotDelegated() throws IOException, InterruptedException }); InputStream in = emptyFactory.newInput( - Compressor.Value.IDENTITY, Digest.getDefaultInstance(), /* offset=*/ 0); + Compressor.Value.IDENTITY, Digest.getDefaultInstance(), /* offset= */ 0); assertThat(in.read()).isEqualTo(-1); } @@ -56,7 +56,7 @@ public void nonEmptyDigestIsDelegated() throws IOException, InterruptedException throw new IOException("invalid"); }); InputStream in = - emptyFactory.newInput(Compressor.Value.IDENTITY, contentDigest, /* offset=*/ 0); + emptyFactory.newInput(Compressor.Value.IDENTITY, contentDigest, /* offset= */ 0); assertThat(ByteString.readFrom(in)).isEqualTo(content); } } diff --git a/src/test/java/build/buildfarm/worker/shard/FailoverInputStreamFactoryTest.java b/src/test/java/build/buildfarm/worker/shard/FailoverInputStreamFactoryTest.java index 78055cafc8..9b14951144 100644 --- a/src/test/java/build/buildfarm/worker/shard/FailoverInputStreamFactoryTest.java +++ b/src/test/java/build/buildfarm/worker/shard/FailoverInputStreamFactoryTest.java @@ -37,17 +37,17 @@ public void DigestInPrimaryIsNotDelegated() throws IOException, InterruptedExcep Digest contentDigest = DIGEST_UTIL.compute(content); FailoverInputStreamFactory failoverFactory = new FailoverInputStreamFactory( - /* primary=*/ (compressor, digest, offset) -> { + /* primary= */ (compressor, digest, offset) -> { if (digest.equals(contentDigest)) { return content.newInput(); } throw new NoSuchFileException(DigestUtil.toString(digest)); }, - /* failover=*/ (compressor, digest, offset) -> { + /* failover= */ (compressor, digest, offset) -> { throw new IOException("invalid"); }); InputStream in = - failoverFactory.newInput(Compressor.Value.IDENTITY, contentDigest, /* offset=*/ 0); + failoverFactory.newInput(Compressor.Value.IDENTITY, contentDigest, /* offset= */ 0); assertThat(ByteString.readFrom(in)).isEqualTo(content); } @@ -57,17 +57,17 @@ public void missingDigestIsDelegated() throws IOException, InterruptedException Digest contentDigest = DIGEST_UTIL.compute(content); FailoverInputStreamFactory failoverFactory = new FailoverInputStreamFactory( - /* primary=*/ (compressor, digest, offset) -> { + /* primary= */ (compressor, digest, offset) -> { throw new NoSuchFileException(DigestUtil.toString(digest)); }, - /* failover=*/ (compressor, digest, offset) -> { + /* failover= */ (compressor, digest, offset) -> { if (digest.equals(contentDigest)) { return content.newInput(); } throw new IOException("invalid"); }); InputStream in = - failoverFactory.newInput(Compressor.Value.IDENTITY, contentDigest, /* offset=*/ 0); + failoverFactory.newInput(Compressor.Value.IDENTITY, contentDigest, /* offset= */ 0); assertThat(ByteString.readFrom(in)).isEqualTo(content); } } diff --git a/src/test/java/build/buildfarm/worker/shard/ShardWorkerContextTest.java b/src/test/java/build/buildfarm/worker/shard/ShardWorkerContextTest.java index dd584c49af..e0a66db27c 100644 --- a/src/test/java/build/buildfarm/worker/shard/ShardWorkerContextTest.java +++ b/src/test/java/build/buildfarm/worker/shard/ShardWorkerContextTest.java @@ -15,6 +15,7 @@ package build.buildfarm.worker.shard; import static build.buildfarm.common.config.Server.INSTANCE_TYPE.SHARD; +import static com.google.common.truth.Truth.assertThat; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -22,9 +23,14 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import build.bazel.remote.execution.v2.ActionResult; +import build.bazel.remote.execution.v2.Command; +import build.bazel.remote.execution.v2.Digest; +import build.bazel.remote.execution.v2.OutputFile; import build.bazel.remote.execution.v2.Platform; import build.bazel.remote.execution.v2.Platform.Property; import build.buildfarm.backplane.Backplane; +import build.buildfarm.cas.ContentAddressableStorage; import build.buildfarm.common.DigestUtil; import build.buildfarm.common.DigestUtil.HashFunction; import build.buildfarm.common.InputStreamFactory; @@ -35,8 +41,13 @@ import build.buildfarm.instance.MatchListener; import build.buildfarm.v1test.QueueEntry; import build.buildfarm.worker.WorkerContext; +import build.buildfarm.worker.resources.LocalResourceSet; import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.common.jimfs.Jimfs; import com.google.protobuf.Duration; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import org.junit.Before; @@ -79,32 +90,34 @@ public void setUp() throws Exception { } WorkerContext createTestContext() { - return createTestContext(/* policies=*/ ImmutableList.of()); + return createTestContext(/* policies= */ ImmutableList.of()); } WorkerContext createTestContext(Iterable policies) { return new ShardWorkerContext( "test", - /* operationPollPeriod=*/ Duration.getDefaultInstance(), - /* operationPoller=*/ (queueEntry, stage, requeueAt) -> false, - /* inlineContentLimit=*/ - /* inputFetchStageWidth=*/ 0, - /* executeStageWidth=*/ 0, - /* inputFetchDeadline=*/ 60, + /* operationPollPeriod= */ Duration.getDefaultInstance(), + /* operationPoller= */ (queueEntry, stage, requeueAt) -> false, + /* inlineContentLimit= */ + /* inputFetchStageWidth= */ 0, + /* executeStageWidth= */ 0, + /* inputFetchDeadline= */ 60, backplane, execFileSystem, inputStreamFactory, policies, instance, - /* deadlineAfter=*/ - /* deadlineAfterUnits=*/ - /* defaultActionTimeout=*/ Duration.getDefaultInstance(), - /* maximumActionTimeout=*/ Duration.getDefaultInstance(), - /* defaultMaxCores=*/ 0, - /* limitGlobalExecution=*/ false, - /* onlyMulticoreTests=*/ false, - /* allowBringYourOwnContainer=*/ false, - /* errorOperationRemainingResources=*/ false, + /* deadlineAfter= */ + /* deadlineAfterUnits= */ + /* defaultActionTimeout= */ Duration.getDefaultInstance(), + /* maximumActionTimeout= */ Duration.getDefaultInstance(), + /* defaultMaxCores= */ 0, + /* limitGlobalExecution= */ false, + /* onlyMulticoreTests= */ false, + /* allowBringYourOwnContainer= */ false, + /* errorOperationRemainingResources= */ false, + /* errorOperationOutputSizeExceeded= */ false, + /* resourceSet= */ new LocalResourceSet(), writer); } @@ -162,4 +175,22 @@ public void dequeueMatchSettingsPlatformAcceptsValidQueueEntry() throws Exceptio context.match(listener); verify(listener, times(1)).onEntry(queueEntry); } + + @Test + public void uploadOutputsWorkingDirectoryRelative() throws Exception { + WorkerContext context = createTestContext(); + Command command = + Command.newBuilder().setWorkingDirectory("foo/bar").addOutputFiles("baz/quux").build(); + ContentAddressableStorage storage = mock(ContentAddressableStorage.class); + when(execFileSystem.getStorage()).thenReturn(storage); + Path actionRoot = Iterables.getFirst(Jimfs.newFileSystem().getRootDirectories(), null); + Files.createDirectories(actionRoot.resolve("foo/bar/baz")); + Files.createFile(actionRoot.resolve("foo/bar/baz/quux")); + ActionResult.Builder resultBuilder = ActionResult.newBuilder(); + context.uploadOutputs(Digest.getDefaultInstance(), resultBuilder, actionRoot, command); + + ActionResult result = resultBuilder.build(); + OutputFile outputFile = Iterables.getOnlyElement(result.getOutputFilesList()); + assertThat(outputFile.getPath()).isEqualTo("baz/quux"); + } } diff --git a/src/test/java/build/buildfarm/worker/shard/ShardWorkerInstanceTest.java b/src/test/java/build/buildfarm/worker/shard/WorkerInstanceTest.java similarity index 84% rename from src/test/java/build/buildfarm/worker/shard/ShardWorkerInstanceTest.java rename to src/test/java/build/buildfarm/worker/shard/WorkerInstanceTest.java index 7a4e40be26..3c07ec31d4 100644 --- a/src/test/java/build/buildfarm/worker/shard/ShardWorkerInstanceTest.java +++ b/src/test/java/build/buildfarm/worker/shard/WorkerInstanceTest.java @@ -50,19 +50,19 @@ import org.mockito.MockitoAnnotations; @RunWith(JUnit4.class) -public class ShardWorkerInstanceTest { +public class WorkerInstanceTest { private final DigestUtil DIGEST_UTIL = new DigestUtil(HashFunction.SHA256); @Mock private Backplane backplane; @Mock private ContentAddressableStorage storage; - private ShardWorkerInstance instance; + private WorkerInstance instance; @Before public void setUp() throws Exception { MockitoAnnotations.initMocks(this); - instance = new ShardWorkerInstance("test", DIGEST_UTIL, backplane, storage); + instance = new WorkerInstance("test", DIGEST_UTIL, backplane, storage); } @SuppressWarnings("unchecked") @@ -116,7 +116,7 @@ public void putActionResultDelegatesToBackplane() throws IOException { public void listOperationsIsUnsupported() { ImmutableList.Builder operations = new ImmutableList.Builder<>(); instance.listOperations( - /* pageSize=*/ 0, /* pageToken=*/ "", /* filter=*/ "", /* operations=*/ operations); + /* pageSize= */ 0, /* pageToken= */ "", /* filter= */ "", /* operations= */ operations); } @Test(expected = UnsupportedOperationException.class) @@ -127,49 +127,49 @@ public void readResourceNameIsUnsupported() { @Test(expected = UnsupportedOperationException.class) public void getTreeIsUnsupported() { instance.getTree( - /* rootDigest=*/ Digest.getDefaultInstance(), - /* pageSize=*/ 0, - /* pageToken=*/ "", - /* tree=*/ Tree.newBuilder()); + /* rootDigest= */ Digest.getDefaultInstance(), + /* pageSize= */ 0, + /* pageToken= */ "", + /* tree= */ Tree.newBuilder()); } @Test(expected = UnsupportedOperationException.class) public void getOperationStreamWriteIsUnsupported() { - instance.getOperationStreamWrite(/* name=*/ null); + instance.getOperationStreamWrite(/* name= */ null); } @Test(expected = UnsupportedOperationException.class) public void newOperationStreamInputIsUnsupported() { instance.newOperationStreamInput( - /* name=*/ null, - /* offset=*/ 0, - /* deadlineAfter=*/ - /* deadlineAfterUnits=*/ RequestMetadata.getDefaultInstance()); + /* name= */ null, + /* offset= */ 0, + /* deadlineAfter= */ + /* deadlineAfterUnits= */ RequestMetadata.getDefaultInstance()); } @Test(expected = UnsupportedOperationException.class) public void executeIsUnsupported() { instance.execute( - /* actionDigest=*/ null, - /* skipCacheLookup=*/ false, - /* executionPolicy=*/ null, - /* resultsCachePolicy=*/ null, - /* requestMetadata=*/ null, - /* watcher=*/ null); + /* actionDigest= */ null, + /* skipCacheLookup= */ false, + /* executionPolicy= */ null, + /* resultsCachePolicy= */ null, + /* requestMetadata= */ null, + /* watcher= */ null); } @Test(expected = UnsupportedOperationException.class) public void matchIsUnsupported() throws InterruptedException { - instance.match(/* platform=*/ null, /* listener=*/ null); + instance.match(/* platform= */ null, /* listener= */ null); } @Test(expected = UnsupportedOperationException.class) public void cancelOperationIsUnsupported() throws InterruptedException { - instance.cancelOperation(/* name=*/ null); + instance.cancelOperation(/* name= */ null); } @Test(expected = UnsupportedOperationException.class) public void deleteOperation() throws InterruptedException { - instance.deleteOperation(/* name=*/ null); + instance.deleteOperation(/* name= */ null); } } diff --git a/src/test/java/build/buildfarm/worker/util/BUILD b/src/test/java/build/buildfarm/worker/util/BUILD new file mode 100644 index 0000000000..dbf262c781 --- /dev/null +++ b/src/test/java/build/buildfarm/worker/util/BUILD @@ -0,0 +1,61 @@ +java_library( + name = "worker_test_utils", + srcs = ["WorkerTestUtils.java"], + visibility = ["//src/test/java:__subpackages__"], + deps = [ + "//persistentworkers/src/main/protobuf:worker_protocol_java_proto", + "//src/main/java/build/buildfarm/cas", + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/config", + "//src/main/java/build/buildfarm/worker/util", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "//src/test/java/build/buildfarm:test_runner", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@maven//:com_github_jnr_jnr_constants", + "@maven//:com_github_jnr_jnr_ffi", + "@maven//:com_github_serceman_jnr_fuse", + "@maven//:com_google_guava_guava", + "@maven//:com_google_jimfs_jimfs", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_truth_truth", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_context", + "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:org_mockito_mockito_core", + "@maven//:org_projectlombok_lombok", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) + +java_test( + name = "tests", + size = "small", + srcs = glob(["*Test.java"]), + test_class = "build.buildfarm.AllTests", + deps = [ + ":worker_test_utils", + "//persistentworkers/src/main/protobuf:worker_protocol_java_proto", + "//src/main/java/build/buildfarm/cas", + "//src/main/java/build/buildfarm/common", + "//src/main/java/build/buildfarm/common/config", + "//src/main/java/build/buildfarm/worker/util", + "//src/main/protobuf:build_buildfarm_v1test_buildfarm_java_proto", + "//src/test/java/build/buildfarm:test_runner", + "@com_google_googleapis//google/rpc:rpc_java_proto", + "@maven//:com_github_jnr_jnr_constants", + "@maven//:com_github_jnr_jnr_ffi", + "@maven//:com_github_serceman_jnr_fuse", + "@maven//:com_google_guava_guava", + "@maven//:com_google_jimfs_jimfs", + "@maven//:com_google_protobuf_protobuf_java", + "@maven//:com_google_truth_truth", + "@maven//:io_grpc_grpc_api", + "@maven//:io_grpc_grpc_context", + "@maven//:io_grpc_grpc_core", + "@maven//:io_grpc_grpc_protobuf", + "@maven//:org_mockito_mockito_core", + "@maven//:org_projectlombok_lombok", + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) diff --git a/src/test/java/build/buildfarm/worker/util/InputsIndexerTest.java b/src/test/java/build/buildfarm/worker/util/InputsIndexerTest.java new file mode 100644 index 0000000000..954eb61e4e --- /dev/null +++ b/src/test/java/build/buildfarm/worker/util/InputsIndexerTest.java @@ -0,0 +1,184 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.util; + +import static build.buildfarm.worker.util.InputsIndexer.BAZEL_TOOL_INPUT_MARKER; +import static com.google.common.truth.Truth.assertThat; + +import build.bazel.remote.execution.v2.Digest; +import build.bazel.remote.execution.v2.Directory; +import build.bazel.remote.execution.v2.DirectoryNode; +import build.bazel.remote.execution.v2.FileNode; +import build.bazel.remote.execution.v2.NodeProperties; +import build.bazel.remote.execution.v2.NodeProperty; +import build.buildfarm.common.DigestUtil; +import build.buildfarm.v1test.Tree; +import com.google.common.collect.ImmutableMap; +import com.google.devtools.build.lib.worker.WorkerProtocol.Input; +import com.google.protobuf.ByteString; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.stream.Collectors; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +// TODO: use WorkerTestUtils.makeTree +@RunWith(JUnit4.class) +public class InputsIndexerTest { + private final DigestUtil DIGEST_UTIL = new DigestUtil(DigestUtil.HashFunction.SHA256); + + @Test + public void basicEmptyTree() { + Tree emptyTree = Tree.newBuilder().build(); + InputsIndexer indexer = new InputsIndexer(emptyTree, Paths.get(".")); + assertThat(indexer.tree).isEqualTo(emptyTree); + } + + @Test + public void canGetRootDir() { + Tree.Builder treeBuilder = Tree.newBuilder(); + + Directory rootDir = Directory.getDefaultInstance(); + Digest rootDirDigest = addDirToTree(treeBuilder, "my_root_dir", rootDir); + treeBuilder.setRootDigest(rootDirDigest); + + Path arbitraryOpRoot = Paths.get("."); + + InputsIndexer indexer = new InputsIndexer(treeBuilder.build(), arbitraryOpRoot); + assertThat(indexer.proxyDirs.get(rootDirDigest)).isEqualTo(rootDir); + assertThat(indexer.getAllInputs().size()).isEqualTo(0); + } + + @Test + public void rootDirWithFiles() { + Tree.Builder treeBuilder = Tree.newBuilder(); + + FileNode myfile = + makeFileNode("my_file", "my file contents", NodeProperties.getDefaultInstance()); + Directory rootDir = Directory.newBuilder().addFiles(myfile).build(); + Digest rootDirDigest = addDirToTree(treeBuilder, "my_root_dir", rootDir); + treeBuilder.setRootDigest(rootDirDigest); + + Path arbitraryOpRoot = Paths.get("asdf"); + InputsIndexer indexer = new InputsIndexer(treeBuilder.build(), arbitraryOpRoot); + assertThat(indexer.proxyDirs.get(rootDirDigest)).isEqualTo(rootDir); + + Input myfileInput = makeInput(arbitraryOpRoot, myfile); + + ImmutableMap expectedInputs = + ImmutableMap.of(Paths.get(myfileInput.getPath()), myfileInput); + + assertThat(indexer.getAllInputs()).isEqualTo(expectedInputs); + } + + @Test + public void canRecurseAndDistinguishToolInputs() { + Tree.Builder treeBuilder = Tree.newBuilder(); + + FileNode myfile = + makeFileNode("my_file", "my file contents", NodeProperties.getDefaultInstance()); + FileNode subdirfile = + makeFileNode("subdir_file", "my subdir file contents", NodeProperties.getDefaultInstance()); + FileNode toolfile = + makeFileNode( + "tool_file", + "my tool file contents", + makeNodeProperties(ImmutableMap.of(BAZEL_TOOL_INPUT_MARKER, "value doesn't matter"))); + + Directory subDir = Directory.newBuilder().addFiles(subdirfile).build(); + String subDirName = "my_sub_dir"; + Digest subDirDigest = addDirToTree(treeBuilder, subDirName, subDir); + + Directory rootDir = + Directory.newBuilder() + .addFiles(myfile) + .addFiles(toolfile) + .addDirectories(makeDirNode(subDirName, subDirDigest)) + .build(); + + Digest rootDirDigest = addDirToTree(treeBuilder, "my_root_dir", rootDir); + treeBuilder.setRootDigest(rootDirDigest); + + Path arbitraryOpRoot = Paths.get("asdf"); + + InputsIndexer indexer = new InputsIndexer(treeBuilder.build(), arbitraryOpRoot); + assertThat(indexer.proxyDirs.get(rootDirDigest)).isEqualTo(rootDir); + assertThat(indexer.proxyDirs.size()).isEqualTo(2); + + Input myfileInput = makeInput(arbitraryOpRoot, myfile); + Input subdirfileInput = makeInput(arbitraryOpRoot.resolve(subDirName), subdirfile); + Input toolfileInput = makeInput(arbitraryOpRoot, toolfile); + + ImmutableMap nonToolInputs = + ImmutableMap.of( + Paths.get(myfileInput.getPath()), + myfileInput, + Paths.get(subdirfileInput.getPath()), + subdirfileInput); + ImmutableMap toolInputs = + ImmutableMap.of(Paths.get(toolfileInput.getPath()), toolfileInput); + ImmutableMap allInputs = + ImmutableMap.builder().putAll(nonToolInputs).putAll(toolInputs).build(); + + assertThat(indexer.getAllInputs()).isEqualTo(allInputs); + assertThat(indexer.getAllInputs().size()).isEqualTo(3); + assertThat(indexer.getToolInputs()).isEqualTo(toolInputs); + } + + Digest addDirToTree(Tree.Builder treeBuilder, String dirname, Directory dir) { + ByteString dirnameBytes = ByteString.copyFromUtf8(dirname); + Digest digest = DIGEST_UTIL.compute(dirnameBytes); + String hash = digest.getHash(); + treeBuilder.putDirectories(hash, dir); + return digest; + } + + FileNode makeFileNode(String filename, String content, NodeProperties nodeProperties) { + return FileNode.newBuilder() + .setName(filename) + .setDigest(DIGEST_UTIL.compute(ByteString.copyFromUtf8(content))) + .setIsExecutable(false) + .setNodeProperties(nodeProperties) + .build(); + } + + DirectoryNode makeDirNode(String dirname, Digest dirDigest) { + // Pretty sure we don't need the actual hash for our testing purposes + return DirectoryNode.newBuilder().setName(dirname).setDigest(dirDigest).build(); + } + + NodeProperties makeNodeProperties(ImmutableMap props) { + return NodeProperties.newBuilder() + .addAllProperties( + props.entrySet().stream() + .map( + kv -> + NodeProperty.newBuilder() + .setName(kv.getKey()) + .setValue(kv.getValue()) + .build()) + .collect(Collectors.toList())) + .build(); + } + + Input makeInput(Path fileDir, FileNode file) { + Path fileNodePath = fileDir.resolve(file.getName()); + return Input.newBuilder() + .setPath(fileNodePath.toString()) + .setDigest(file.getDigest().getHashBytes()) + .build(); + } +} diff --git a/src/test/java/build/buildfarm/worker/util/WorkerTestUtils.java b/src/test/java/build/buildfarm/worker/util/WorkerTestUtils.java new file mode 100644 index 0000000000..dbaeca5c9f --- /dev/null +++ b/src/test/java/build/buildfarm/worker/util/WorkerTestUtils.java @@ -0,0 +1,226 @@ +// Copyright 2023 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build.buildfarm.worker.util; + +import static build.buildfarm.worker.util.InputsIndexer.BAZEL_TOOL_INPUT_MARKER; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import build.bazel.remote.execution.v2.Command; +import build.bazel.remote.execution.v2.Digest; +import build.bazel.remote.execution.v2.Directory; +import build.bazel.remote.execution.v2.DirectoryNode; +import build.bazel.remote.execution.v2.FileNode; +import build.bazel.remote.execution.v2.NodeProperties; +import build.bazel.remote.execution.v2.NodeProperty; +import build.buildfarm.common.DigestUtil; +import build.buildfarm.v1test.Tree; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.devtools.build.lib.worker.WorkerProtocol.Input; +import com.google.protobuf.ByteString; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.FileVisitor; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class WorkerTestUtils { + public static final DigestUtil DIGEST_UTIL = new DigestUtil(DigestUtil.HashFunction.SHA256); + + public static FileNode makeFileNode( + String filename, String content, NodeProperties nodeProperties) { + return FileNode.newBuilder() + .setName(filename) + .setDigest(DIGEST_UTIL.compute(ByteString.copyFromUtf8(content))) + .setIsExecutable(false) + .setNodeProperties(nodeProperties) + .build(); + } + + public static DirectoryNode makeDirNode(String dirname, Digest dirDigest) { + // Pretty sure we don't need the actual hash for our testing purposes + return DirectoryNode.newBuilder().setName(dirname).setDigest(dirDigest).build(); + } + + public static Digest addDirToTree(Tree.Builder treeBuilder, String dirname, Directory dir) { + ByteString dirnameBytes = ByteString.copyFromUtf8(dirname); + Digest digest = DIGEST_UTIL.compute(dirnameBytes); + String hash = digest.getHash(); + treeBuilder.putDirectories(hash, dir); + return digest; + } + + public static NodeProperties makeNodeProperties(ImmutableMap props) { + return NodeProperties.newBuilder() + .addAllProperties( + props.entrySet().stream() + .map( + kv -> + NodeProperty.newBuilder() + .setName(kv.getKey()) + .setValue(kv.getValue()) + .build()) + .collect(Collectors.toList())) + .build(); + } + + public static Input makeInput(Path fileDir, FileNode file) { + Path fileNodePath = fileDir.resolve(file.getName()); + return Input.newBuilder() + .setPath(fileNodePath.toString()) + .setDigest(file.getDigest().getHashBytes()) + .build(); + } + + public static Command makeCommand() { + ImmutableList outputFiles = ImmutableList.of("output_file", "out_subdir/out_subfile"); + ImmutableList outputDirs = ImmutableList.of("out_subdir"); + ImmutableList outputPaths = + ImmutableList.builder().addAll(outputFiles).addAll(outputDirs).build(); + + return Command.newBuilder() + .addAllOutputFiles(outputFiles) + .addAllOutputDirectories(outputDirs) + .addAllOutputPaths(outputPaths) + .build(); + } + + public static class TreeFile { + public final String path; + public final boolean isTool; + + // null means directory + public final String content; + + public TreeFile(String path) { + this(path, "", false); + } + + public TreeFile(String path, String content) { + this(path, content, false); + } + + public TreeFile(String path, String content, boolean isTool) { + this.path = path; + this.isTool = isTool; + this.content = content; + } + + public boolean isDir() { + return this.content == null; + } + + public String name() { + return Paths.get(this.path).getFileName().toString(); + } + } + + public static Tree makeTree(String rootDirPath, List files) { + Tree.Builder treeBuilder = Tree.newBuilder(); + if (files.isEmpty()) { + return treeBuilder.build(); + } + Directory.Builder rootDirBuilder = Directory.newBuilder(); + + Map dirBuilders = new HashMap<>(); + + for (TreeFile file : files) { + if (file.isDir()) { + dirBuilders.computeIfAbsent(file.path, (filePath) -> Directory.newBuilder()); + } else { + NodeProperties props = NodeProperties.getDefaultInstance(); + if (file.isTool) { + props = makeNodeProperties(ImmutableMap.of(BAZEL_TOOL_INPUT_MARKER, "")); + } + FileNode fileNode = makeFileNode(file.name(), file.content, props); + Path parentDirPath = Paths.get(file.path).getParent(); + if (parentDirPath != null) { + String parentDirPathStr = parentDirPath.normalize().toString(); + Directory.Builder parentDirBuilder = + dirBuilders.computeIfAbsent(parentDirPathStr, (filePath) -> Directory.newBuilder()); + parentDirBuilder.addFiles(fileNode); + } else { + rootDirBuilder.addFiles(fileNode); + } + } + } + + for (Map.Entry entry : dirBuilders.entrySet()) { + String subDirName = entry.getKey(); + Directory subDir = entry.getValue().build(); + Digest subDirDigest = addDirToTree(treeBuilder, subDirName, subDir); + rootDirBuilder.addDirectories(makeDirNode(subDirName, subDirDigest)); + } + + Digest rootDirDigest = addDirToTree(treeBuilder, rootDirPath, rootDirBuilder.build()); + treeBuilder.setRootDigest(rootDirDigest); + + return treeBuilder.build(); + } + + public static List listFilesRec(Path root) throws IOException { + List filesFound = new ArrayList<>(); + + Files.walkFileTree( + root, + new FileVisitor() { + @Override + public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) + throws IOException { + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) + throws IOException { + filesFound.add(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException { + throw new IOException("visitFileFailed"); + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + filesFound.add(dir); + return FileVisitResult.CONTINUE; + } + }); + + return filesFound; + } + + // Check all expected files exist and that only they exist + public static void assertFilesExistExactly(Path root, List expectedFiles) + throws IOException { + List listedPaths = listFilesRec(root); + for (Path filePath : listedPaths) { + assertWithMessage("Path not match prefix of any expected file: " + filePath) + .that(expectedFiles.stream().anyMatch(p -> p.startsWith(p))) + .isTrue(); + } + assertThat(listedPaths).containsAtLeastElementsIn(expectedFiles); + } +} diff --git a/src/test/many/.bazelversion b/src/test/many/.bazelversion index 7cbea073be..19b860c187 100644 --- a/src/test/many/.bazelversion +++ b/src/test/many/.bazelversion @@ -1 +1 @@ -5.2.0 \ No newline at end of file +6.4.0 diff --git a/src/test/many/BUILD b/src/test/many/BUILD index 0526104520..775051cb23 100644 --- a/src/test/many/BUILD +++ b/src/test/many/BUILD @@ -8,10 +8,10 @@ MANY_CC_LIBRARIES, and MANY_CC_LIBRARY_SOURCES. For instance: MANY_CC_BINARIES=20 MANY_CC_LIBRARIES=10 MANY_CC_LIBRARY_SOURCES=5 bazel build //:cc """ -load(":many-cc.bzl", "many_cc") load("@many-params//:cc-binaries.bzl", "cc_binaries") load("@many-params//:cc-libraries.bzl", "cc_libraries") load("@many-params//:cc-library-sources.bzl", "cc_library_sources") +load(":many-cc.bzl", "many_cc") many_cc( name = "cc", diff --git a/src/test/many/many-cc.bzl b/src/test/many/many-cc.bzl index 90c88113b4..03fb37b0cc 100644 --- a/src/test/many/many-cc.bzl +++ b/src/test/many/many-cc.bzl @@ -2,8 +2,8 @@ Provide a simple C++ build graph as large as desired. """ -load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library") load("@bazel_skylib//rules:write_file.bzl", _write_file = "write_file") +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library") def write_file(name, out, lines): """ diff --git a/third_party/BUILD.blake3 b/third_party/BUILD.blake3 deleted file mode 100644 index 512b338d9d..0000000000 --- a/third_party/BUILD.blake3 +++ /dev/null @@ -1,92 +0,0 @@ -load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library") - -licenses(["notice"]) # BSD/MIT-like license - -exports_files(["LICENSE"]) - -filegroup( - name = "srcs", - srcs = glob(["**"]), - visibility = ["//third_party:__pkg__"], -) - -cc_library( - name = "blake3", - srcs = [ - "c/blake3.c", - "c/blake3_dispatch.c", - "c/blake3_portable.c", - ] + select({ - "@bazel_tools//src/conditions:linux_x86_64": [ - "c/blake3_avx2_x86-64_unix.S", - # Disable to appease bazel-ci which uses ubuntu-18 (EOL) and GCC 7 - # lacking the headers to compile AVX512. - # "c/blake3_avx512_x86-64_unix.S", - "c/blake3_sse2_x86-64_unix.S", - "c/blake3_sse41_x86-64_unix.S", - ], - "@bazel_tools//src/conditions:linux_aarch64": [ - "c/blake3_neon.c", - ], - "@bazel_tools//src/conditions:windows_x64": [ - "c/blake3_avx2_x86-64_windows_msvc.asm", - "c/blake3_avx512_x86-64_windows_msvc.asm", - "c/blake3_sse2_x86-64_windows_msvc.asm", - "c/blake3_sse41_x86-64_windows_msvc.asm", - ], - "@bazel_tools//src/conditions:windows_arm64": [ - "c/blake3_neon.c", - ], - "@bazel_tools//src/conditions:darwin_arm64": [ - "c/blake3_neon.c", - ], - "//conditions:default": [], - }), - hdrs = [ - "c/blake3.h", - "c/blake3_impl.h", - ], - copts = select({ - "@bazel_tools//src/conditions:linux_x86_64": [ - # Disable to appease bazel-ci which uses ubuntu-18 (EOL) and GCC 7 - # lacking the headers to compile AVX512. - "-DBLAKE3_NO_AVX512", - ], - "@bazel_tools//src/conditions:linux_aarch64": [ - "-DBLAKE3_USE_NEON=1", - ], - "@bazel_tools//src/conditions:windows_x64": [], - "@bazel_tools//src/conditions:windows_arm64": [ - "-DBLAKE3_USE_NEON=1", - ], - "@bazel_tools//src/conditions:darwin_arm64": [ - "-DBLAKE3_USE_NEON=1", - ], - "//conditions:default": [ - "-DBLAKE3_NO_AVX2", - "-DBLAKE3_NO_AVX512", - "-DBLAKE3_NO_NEON", - "-DBLAKE3_NO_SSE2", - "-DBLAKE3_NO_SSE41", - ], - }), - includes = ["."], - visibility = ["//visibility:public"], -) - -cc_binary( - name = "example", - srcs = [ - "c/example.c", - ], - copts = [ - "-w", - "-O3", - ], - includes = ["."], - visibility = ["//visibility:public"], - deps = [ - ":blake3", - ], -) - diff --git a/third_party/BUILD.googleapis b/third_party/BUILD.googleapis deleted file mode 100644 index e51fecb67b..0000000000 --- a/third_party/BUILD.googleapis +++ /dev/null @@ -1,201 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") - -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -exports_files(["LICENSE"]) - -load("@io_grpc_grpc_java//:java_grpc_library.bzl", "java_grpc_library") - -java_proto_library( - name = "google_devtools_build_v1_build_status_java_proto", - deps = [":google_devtools_build_v1_build_status_proto"], -) - -java_proto_library( - name = "google_devtools_build_v1_build_events_java_proto", - deps = [":google_devtools_build_v1_build_events_proto"], -) - -java_grpc_library( - name = "google_devtools_build_v1_publish_build_event_java_grpc", - srcs = [":google_devtools_build_v1_publish_build_event_proto"], - deps = [":google_devtools_build_v1_publish_build_event_java_proto"], -) - -java_proto_library( - name = "google_devtools_build_v1_publish_build_event_java_proto", - deps = [":google_devtools_build_v1_publish_build_event_proto"], -) - -java_proto_library( - name = "google_bytestream_bytestream_java_proto", - deps = [":google_bytestream_bytestream_proto"], -) - -java_proto_library( - name = "google_longrunning_operations_java_proto", - deps = [":google_longrunning_operations_proto"], -) - -java_proto_library( - name = "google_rpc_status_java_proto", - deps = [":google_rpc_status_proto"], -) - -java_proto_library( - name = "google_rpc_error_details_java_proto", - deps = [":google_rpc_error_details_proto"], -) - -java_proto_library( - name = "google_rpc_code_java_proto", - deps = [":google_rpc_code_proto"], -) - -java_proto_library( - name = "google_api_annotations_java_proto", - deps = [":google_api_annotations_proto"], -) - -java_proto_library( - name = "google_api_http_java_proto", - deps = [":google_api_http_proto"], -) - -java_proto_library( - name = "google_api_auth_java_proto", - deps = [":google_api_auth_proto"], -) - -java_grpc_library( - name = "google_bytestream_bytestream_java_grpc", - srcs = [":google_bytestream_bytestream_proto"], - deps = [":google_bytestream_bytestream_java_proto"], -) - -java_grpc_library( - name = "google_longrunning_operations_java_grpc", - srcs = [":google_longrunning_operations_proto"], - deps = [":google_longrunning_operations_java_proto"], -) - -proto_library( - name = "google_rpc_code_proto", - srcs = ["google/rpc/code.proto"], -) - -proto_library( - name = "google_rpc_error_details_proto", - srcs = ["google/rpc/error_details.proto"], - deps = [ - "@com_google_protobuf//:any_proto", - "@com_google_protobuf//:duration_proto", - ], -) - -proto_library( - name = "google_bytestream_bytestream_proto", - srcs = ["google/bytestream/bytestream.proto"], - deps = [ - ":google_api_annotations_proto", - "@com_google_protobuf//:wrappers_proto", - ], -) - -proto_library( - name = "google_longrunning_operations_proto", - srcs = ["google/longrunning/operations.proto"], - deps = [ - ":google_api_annotations_proto", - ":google_api_http_proto", - ":google_rpc_status_proto", - ":google_api_client_proto", - "@com_google_protobuf//:any_proto", - "@com_google_protobuf//:empty_proto", - "@com_google_protobuf//:duration_proto", - "@com_google_protobuf//:descriptor_proto", - ], -) - -proto_library( - name = "google_devtools_build_v1_build_status_proto", - srcs = ["google/devtools/build/v1/build_status.proto"], - deps = [ - ":google_api_annotations_proto", - "@com_google_protobuf//:any_proto", - "@com_google_protobuf//:wrappers_proto", - ], -) - -proto_library( - name = "google_devtools_build_v1_build_events_proto", - srcs = ["google/devtools/build/v1/build_events.proto"], - deps = [ - ":google_api_annotations_proto", - ":google_devtools_build_v1_build_status_proto", - ":google_rpc_status_proto", - "@com_google_protobuf//:any_proto", - "@com_google_protobuf//:timestamp_proto", - "@com_google_protobuf//:wrappers_proto", - ], -) - -proto_library( - name = "google_devtools_build_v1_publish_build_event_proto", - srcs = ["google/devtools/build/v1/publish_build_event.proto"], - deps = [ - ":google_api_annotations_proto", - ":google_api_auth_proto", - ":google_devtools_build_v1_build_events_proto", - ":google_api_field_behavior_proto", - ":google_api_client_proto", - "@com_google_protobuf//:any_proto", - "@com_google_protobuf//:duration_proto", - "@com_google_protobuf//:empty_proto", - ], -) - -proto_library( - name = "google_api_annotations_proto", - srcs = ["google/api/annotations.proto"], - deps = [ - ":google_api_http_proto", - "@com_google_protobuf//:descriptor_proto", - ], -) - -proto_library( - name = "google_api_http_proto", - srcs = ["google/api/http.proto"], -) - -proto_library( - name = "google_api_client_proto", - srcs = ["google/api/client.proto"], - deps = [ - "@com_google_protobuf//:descriptor_proto", - ":google_api_field_behavior_proto", - ] -) - -proto_library( - name = "google_api_field_behavior_proto", - srcs = ["google/api/field_behavior.proto"], - deps = [ - "@com_google_protobuf//:descriptor_proto", - ] -) - -proto_library( - name = "google_rpc_status_proto", - srcs = ["google/rpc/status.proto"], - deps = ["@com_google_protobuf//:any_proto"], -) - -proto_library( - name = "google_api_auth_proto", - srcs = ["google/api/auth.proto"], - deps = [":google_api_annotations_proto"], -) diff --git a/third_party/BUILD.remote_apis b/third_party/BUILD.remote_apis deleted file mode 100644 index 3277c127d0..0000000000 --- a/third_party/BUILD.remote_apis +++ /dev/null @@ -1,39 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -exports_files(["LICENSE"]) - -load("@io_grpc_grpc_java//:java_grpc_library.bzl", "java_grpc_library") - -java_proto_library( - name = "build_bazel_remote_asset_v1_remote_asset_java_proto", - deps = [ - "//build/bazel/remote/asset/v1:remote_asset_proto", - ], -) - -java_grpc_library( - name = "build_bazel_remote_asset_v1_remote_asset_java_grpc", - srcs = ["//build/bazel/remote/asset/v1:remote_asset_proto"], - deps = [":build_bazel_remote_asset_v1_remote_asset_java_proto"], -) - -java_proto_library( - name = "build_bazel_remote_execution_v2_remote_execution_java_proto", - deps = [ - "//build/bazel/remote/execution/v2:remote_execution_proto", - "@googleapis//:google_longrunning_operations_proto", - ], -) - -java_grpc_library( - name = "build_bazel_remote_execution_v2_remote_execution_java_grpc", - srcs = ["//build/bazel/remote/execution/v2:remote_execution_proto"], - deps = [":build_bazel_remote_execution_v2_remote_execution_java_proto"], -) - -java_proto_library( - name = "build_bazel_semver_java_proto", - deps = ["//build/bazel/semver:semver_proto"], -) diff --git a/third_party/docker_go_toolchain.patch b/third_party/docker_go_toolchain.patch new file mode 100644 index 0000000000..3b00ff333c --- /dev/null +++ b/third_party/docker_go_toolchain.patch @@ -0,0 +1,11 @@ +--- repositories/go_repositories.bzl.orig 2023-09-23 08:36:00.148468653 -0400 ++++ repositories/go_repositories.bzl 2023-09-23 08:33:22.502127476 -0400 +@@ -37,7 +37,7 @@ + go_repository_default_config (str, optional): A file used to determine the root of the workspace. + """ + go_rules_dependencies() +- go_register_toolchains() ++ go_register_toolchains("1.21.0") + gazelle_dependencies(go_repository_default_config = go_repository_default_config) + excludes = native.existing_rules().keys() + if "com_github_google_go_containerregistry" not in excludes: diff --git a/third_party/jedis/BUILD b/third_party/jedis/BUILD deleted file mode 100644 index fa5987e544..0000000000 --- a/third_party/jedis/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -licenses(["notice"]) - -java_library( - name = "jedis", - visibility = [ - "//visibility:public", - ], - exports = [ - "//external:jar/redis/clients/jedis", - ], - runtime_deps = [ - "@maven//:org_apache_commons_commons_pool2", - ], -) diff --git a/third_party/jedis/LICENSE b/third_party/jedis/LICENSE deleted file mode 100644 index 7b8b1cee63..0000000000 --- a/third_party/jedis/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2010 Jonathan Leibiusky - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/third_party/remote-apis/BUILD b/third_party/remote-apis/BUILD deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/third_party/remote-apis/BUILD.bazel b/third_party/remote-apis/BUILD.bazel new file mode 100644 index 0000000000..40aba1531b --- /dev/null +++ b/third_party/remote-apis/BUILD.bazel @@ -0,0 +1,23 @@ +load("@grpc-java//:java_grpc_library.bzl", "java_grpc_library") + +package(default_visibility = ["//visibility:public"]) + +java_grpc_library( + name = "build_bazel_remote_asset_v1_remote_asset_java_grpc", + srcs = [ + "@remoteapis//build/bazel/remote/asset/v1:remote_asset_proto", + ], + deps = [ + "@remoteapis//build/bazel/remote/asset/v1:remote_asset_java_proto", + ], +) + +java_grpc_library( + name = "build_bazel_remote_execution_v2_remote_execution_java_grpc", + srcs = [ + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_proto", + ], + deps = [ + "@remoteapis//build/bazel/remote/execution/v2:remote_execution_java_proto", + ], +) diff --git a/third_party/remote-apis/remote-apis.patch b/third_party/remote-apis/remote-apis.patch deleted file mode 100644 index e86fec5f54..0000000000 --- a/third_party/remote-apis/remote-apis.patch +++ /dev/null @@ -1,38 +0,0 @@ -diff --git a/repository_rules.bzl b/repository_rules.bzl -index 2b36886..6ffc75b 100644 ---- a/repository_rules.bzl -+++ b/repository_rules.bzl -@@ -6,8 +6,6 @@ This is adapted from - https://github.com/googleapis/googleapis/blob/master/repository_rules.bzl - """ - --load("//:remote_apis_deps.bzl", "remote_apis_go_deps") -- - def _switched_rules_impl(ctx): - disabled_rule_script = """ - def {rule_name}(**kwargs): -@@ -118,8 +116,6 @@ def switched_rules_by_language( - name = name, - rules = rules, - ) -- if go: -- remote_apis_go_deps() - - def _switch(enabled, enabled_value): - return enabled_value if enabled else "" - -diff --git a/build/bazel/remote/execution/v2/remote_execution.proto b/build/bazel/remote/execution/v2/remote_execution.proto -index dbeff9c..4398b2f 100644 ---- a/build/bazel/remote/execution/v2/remote_execution.proto -+++ b/build/bazel/remote/execution/v2/remote_execution.proto -@@ -1688,6 +1688,10 @@ message DigestFunction { - // cryptographic hash function and its collision properties are not strongly guaranteed. - // See https://github.com/aappleby/smhasher/wiki/MurmurHash3 . - MURMUR3 = 7; -+ -+ // The BLAKE3 hash function. -+ // See https://github.com/BLAKE3-team/BLAKE3. -+ BLAKE3 = 9; - } - } - diff --git a/tools/buildfarm-indexer.py b/tools/buildfarm-indexer.py index 824763dd21..b020cb91d5 100755 --- a/tools/buildfarm-indexer.py +++ b/tools/buildfarm-indexer.py @@ -1,5 +1,5 @@ from redis.client import Pipeline -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster import sys def get_cas_page(r, cursor, count): @@ -15,7 +15,7 @@ def get_cas_page(r, cursor, count): print ("usage: buildfarm-indexer.py ") sys.exit(1) -r = StrictRedisCluster(startup_nodes=[{"host": redis_host, "port": 6379}], skip_full_coverage_check=True) +r = RedisCluster(startup_nodes=[{"host": redis_host, "port": 6379}], skip_full_coverage_check=True) nodes = r.connection_pool.nodes @@ -30,14 +30,15 @@ def get_cas_page(r, cursor, count): slots.remove(slot) node_keys[slot] = str(node_key) -workers = r.hkeys("Workers") +# config f"{backplane.workersHashName}_storage" +workers = r.hkeys("Workers_storage") worker_count = len(workers) print ("%d workers" % worker_count) p = r.pipeline() -for node_key in node_keys.viewvalues(): +for node_key in node_keys.values(): p.delete("{%s}:intersecting-workers" % node_key) p.sadd("{%s}:intersecting-workers" % node_key, *workers) p.execute() @@ -101,8 +102,9 @@ def process(self, cas_names, conn): count = len(cas_names) p = self.pipeline(conn) for i in range(count): - name = cas_names[i] - node_key = node_keys[nodes.keyslot(str(name))] + name = cas_names[i].decode() + keyslot = nodes.keyslot(name) + node_key = node_keys[keyslot] set_key = "{%s}:intersecting-workers" % node_key p.sinterstore(name, set_key, name) p.execute() @@ -116,8 +118,8 @@ def process(self, cas_names, conn): map_cas_page(r, 10000, indexer.process) p = r.pipeline() -for node_key in node_keys.viewvalues(): +for node_key in node_keys.values(): p.delete("{%s}:intersecting-workers" % node_key) p.execute() -print("\n%d processed" % (indexer.processed)) \ No newline at end of file +print("\n%d processed" % (indexer.processed))