diff --git a/.github/workflows/pytest_cpu_gha_runner.yaml b/.github/workflows/pytest_cpu_gha_runner.yaml index 4c8d6a671..fc67d30c0 100644 --- a/.github/workflows/pytest_cpu_gha_runner.yaml +++ b/.github/workflows/pytest_cpu_gha_runner.yaml @@ -2,20 +2,20 @@ name: "[GHA][CPU] llama-cookbook Pytest tests on CPU GitHub hosted runner." on: pull_request: branches: - - 'main' + - "main" # triggers workflow manually for debugging purposes. workflow_dispatch: inputs: runner: - description: 'GHA Runner Scale Set label to run workflow on.' + description: "GHA Runner Scale Set label to run workflow on." required: true default: ubuntu-24.04 debug: - description: 'Run debugging steps?' - required: false - default: "true" + description: "Run debugging steps?" + required: false + default: "true" env: PYTORCH_WHEEL_URL: https://download.pytorch.org/whl/test/cu118 @@ -28,23 +28,21 @@ jobs: shell: bash # default shell to run all steps for a given job. runs-on: ${{ github.event.inputs.runner != '' && github.event.inputs.runner || 'ubuntu-24.04' }} steps: - - name: "[DEBUG] Get runner container OS information" id: os_info if: ${{ github.event.inputs.debug == 'true' }} run: | - cat /etc/os-release + cat /etc/os-release - name: "Checkout 'meta-llama/llama-cookbook' repository" id: checkout uses: actions/checkout@v4 - - name: "[DEBUG] Content of the repository after checkout" id: content_after_checkout if: ${{ github.event.inputs.debug == 'true' }} run: | - ls -la ${GITHUB_WORKSPACE} + ls -la ${GITHUB_WORKSPACE} - name: "Installing Python dependencies" id: python_dependencies @@ -52,20 +50,28 @@ jobs: pip3 install --upgrade pip pip3 install setuptools - - name: "Installing 'llama-cookbook' project" id: install_llama_cookbook_package run: | echo "Installing 'llama-cookbook' project (re: https://github.com/meta-llama/llama-cookbook/tree/main/src?tab=readme-ov-file#install-with-optional-dependencies)" pip install --extra-index-url ${PYTORCH_WHEEL_URL} -e '.[tests]' - - name: "Running PyTest tests on GHA CPU Runner" id: pytest + continue-on-error: true run: | echo "Running PyTest tests at 'GITHUB_WORKSPACE' path: ${GITHUB_WORKSPACE}" cd $GITHUB_WORKSPACE && python3 -m pytest --junitxml="$GITHUB_WORKSPACE/result.xml" + # checking for the presence of the Junit generated report file: + if [ -f "$GITHUB_WORKSPACE/result.xml"]; then + cat "$GITHUB_WORKSPACE/result.xml" + echo "report_available=true" "$GITHUB_OUTPUT" + else + echo "$GITHUB_WORKSPACE/result.xml file is not present." + echo "report_available=false" "$GITHUB_OUTPUT" + fi + - name: Publish Test Summary id: test_summary uses: test-summary/action@v2 @@ -73,4 +79,4 @@ jobs: paths: | **/*.xml !**/AndroidManifest.xml - if: always() + if: steps.pytest.outputs.report_available == 'true' \ No newline at end of file diff --git a/src/tests/datasets/test_samsum_datasets.py b/src/tests/datasets/test_samsum_datasets.py index 0297cbb30..7a880a592 100644 --- a/src/tests/datasets/test_samsum_datasets.py +++ b/src/tests/datasets/test_samsum_datasets.py @@ -12,7 +12,7 @@ class Config: model_type: str = "llama" try: - load_dataset("Samsung/samsum") + load_dataset("knkarthick/samsum") SAMSUM_UNAVAILABLE = False except ValueError: SAMSUM_UNAVAILABLE = True