diff --git a/.github/actions/common/code-style/action.yml b/.github/actions/common/code-style/action.yml index 93e6b55a..a291f8cb 100644 --- a/.github/actions/common/code-style/action.yml +++ b/.github/actions/common/code-style/action.yml @@ -1,77 +1,77 @@ -name: "Code Style Check" -description: "Reusable action to check C/C++ code style with clang-format" -inputs: - target_dir: - description: "Directory to check for C/C++ files" - required: false - default: "." - name: - description: 'Name for the output artifact' - required: false - default: 'code-style-check-report' - fail-on-findings: - description: "Whether to fail the action if issues are found" - required: false - default: "true" - -runs: - using: "composite" - steps: - - name: Install dependencies - run: | - sudo apt-get update - sudo apt-get install --no-install-recommends -y clang-format curl ca-certificates build-essential - curl -fsSL https://deb.nodesource.com/setup_18.x | sudo -E bash - - sudo apt-get install -y nodejs - npm install -g diff2html-cli - shell: bash - - - name: Run code style check - id: code-style-check - env: - target_dir: ${{ inputs.target_dir }} - run: | - chmod +x ${target_dir}/.github/actions/common/code-style/entrypoint.sh - ${target_dir}/.github/actions/common/code-style/entrypoint.sh "${target_dir}" || echo "STYLE_ISSUES=true" >> $GITHUB_OUTPUT - shell: bash - - - name: Analyze code style results - if: always() - run: | - if [ "${{ steps.code-style-check.outputs.STYLE_ISSUES }}" == "true" ]; then - # Count number of files with style issues - if [ -f "_output/diff.html" ]; then - # Try to count files from diff output - file_count=$(diff -u --recursive "${{ inputs.target_dir }}" "_styled/${{ inputs.target_dir }}" 2>/dev/null | grep -c "^diff -u" || echo "1+") - - echo "### Code Style Check Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "- ❌ **Status**: Style issues found" >> $GITHUB_STEP_SUMMARY - echo "- 📁 **Files affected**: ${file_count}" >> $GITHUB_STEP_SUMMARY - echo "- 📄 **Detailed report**: Available in artifacts (diff.html)" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "âš ī¸ **Please review the code-style report artifact and apply clang-format to fix the issues.**" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "💡 **Tip**: Run \`clang-format -i\` on the affected files to automatically fix formatting." >> $GITHUB_STEP_SUMMARY - fi - else - echo "### Code Style Check Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "✅ **All code follows the style guidelines!**" >> $GITHUB_STEP_SUMMARY - fi - shell: bash - - - name: Upload clang-format report - if: always() - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: ${{ inputs.name }} - path: _output/diff.html - if-no-files-found: ignore - - - name: Fail if code style issues found - if: inputs.fail-on-findings == 'true' && steps.code-style-check.outputs.STYLE_ISSUES == 'true' - shell: bash - run: | - echo "❌ Code style issues found. Failing the job." - exit 1 +name: "Code Style Check" +description: "Reusable action to check C/C++ code style with clang-format" +inputs: + target_dir: + description: "Directory to check for C/C++ files" + required: false + default: "." + name: + description: 'Name for the output artifact' + required: false + default: 'code-style-check-report' + fail-on-findings: + description: "Whether to fail the action if issues are found" + required: false + default: "true" + +runs: + using: "composite" + steps: + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install --no-install-recommends -y clang-format curl ca-certificates build-essential + curl -fsSL https://deb.nodesource.com/setup_18.x | sudo -E bash - + sudo apt-get install -y nodejs + npm install -g diff2html-cli + shell: bash + + - name: Run code style check + id: code-style-check + env: + target_dir: ${{ inputs.target_dir }} + run: | + chmod +x ${target_dir}/.github/actions/common/code-style/entrypoint.sh + ${target_dir}/.github/actions/common/code-style/entrypoint.sh "${target_dir}" || echo "STYLE_ISSUES=true" >> $GITHUB_OUTPUT + shell: bash + + - name: Analyze code style results + if: always() + run: | + if [ "${{ steps.code-style-check.outputs.STYLE_ISSUES }}" == "true" ]; then + # Count number of files with style issues + if [ -f "_output/diff.html" ]; then + # Try to count files from diff output + file_count=$(diff -u --recursive "${{ inputs.target_dir }}" "_styled/${{ inputs.target_dir }}" 2>/dev/null | grep -c "^diff -u" || echo "1+") + + echo "### Code Style Check Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- ❌ **Status**: Style issues found" >> $GITHUB_STEP_SUMMARY + echo "- 📁 **Files affected**: ${file_count}" >> $GITHUB_STEP_SUMMARY + echo "- 📄 **Detailed report**: Available in artifacts (diff.html)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "âš ī¸ **Please review the code-style report artifact and apply clang-format to fix the issues.**" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "💡 **Tip**: Run \`clang-format -i\` on the affected files to automatically fix formatting." >> $GITHUB_STEP_SUMMARY + fi + else + echo "### Code Style Check Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "✅ **All code follows the style guidelines!**" >> $GITHUB_STEP_SUMMARY + fi + shell: bash + + - name: Upload clang-format report + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ${{ inputs.name }} + path: _output/diff.html + if-no-files-found: ignore + + - name: Fail if code style issues found + if: inputs.fail-on-findings == 'true' && steps.code-style-check.outputs.STYLE_ISSUES == 'true' + shell: bash + run: | + echo "❌ Code style issues found. Failing the job." + exit 1 diff --git a/.github/actions/common/hadolint/action.yaml b/.github/actions/common/hadolint/action.yaml index c3d3fa41..2ada355d 100644 --- a/.github/actions/common/hadolint/action.yaml +++ b/.github/actions/common/hadolint/action.yaml @@ -1,130 +1,130 @@ -name: Run Hadolint -description: Lint Dockerfiles using Hadolint - -inputs: - dockerfile: - description: Path to Dockerfile - required: true - output-file: - description: Path to output file for lint results - required: true - name: - description: Name for the artifact - required: true - enable-reviewdog: - description: Enable ReviewDog PR comments - required: false - default: "false" - github_token: - description: GitHub token for ReviewDog - required: false - fail-on-findings: - description: "Whether to fail the action if issues are found" - required: false - default: "true" - -runs: - using: "composite" - steps: - - name: Install Hadolint - run: | - curl -sSL -o hadolint https://github.com/hadolint/hadolint/releases/download/v2.12.0/hadolint-Linux-x86_64 - chmod +x hadolint - sudo mv hadolint /usr/local/bin/ - shell: bash - - - name: Run Hadolint - id: run-hadolint - env: - dockerfile: ${{ inputs.dockerfile }} - output_file: ${{ inputs.output-file }} - run: | - hadolint ${dockerfile} \ - --format tty \ - 2>&1 | tee ${output_file} || true - if [ ! -f "${output_file}" ]; then - echo "No Dockerfile found or hadolint produced no output" > ${output_file} - fi - shell: bash - - - name: Analyze Hadolint results - if: always() - env: - output_file: ${{ inputs.output-file }} - name: ${{ inputs.name }} - run: | - if [ -f "${output_file}" ]; then - # Count issues by severity (hadolint format: DL#### or SC#### followed by colored severity) - # Pattern matches: DL3008 or SC1091 (hadolint and shellcheck codes) - error_count=$(grep -E "(DL|SC)[0-9]+" "${output_file}" 2>/dev/null | grep -i "error" | wc -l | tr -d '[:space:]' || echo "0") - warning_count=$(grep -E "(DL|SC)[0-9]+" "${output_file}" 2>/dev/null | grep -i "warning" | wc -l | tr -d '[:space:]' || echo "0") - info_count=$(grep -E "(DL|SC)[0-9]+" "${output_file}" 2>/dev/null | grep -i "info" | wc -l | tr -d '[:space:]' || echo "0") - style_count=$(grep -E "(DL|SC)[0-9]+" "${output_file}" 2>/dev/null | grep -i "style" | wc -l | tr -d '[:space:]' || echo "0") - # Ensure counts are valid integers, default to 0 if empty - error_count=${error_count:-0} - warning_count=${warning_count:-0} - info_count=${info_count:-0} - style_count=${style_count:-0} - # Additional safety check - ensure numeric (use case to validate) - case "$error_count" in ''|*[!0-9]*) error_count=0 ;; esac || true - case "$warning_count" in ''|*[!0-9]*) warning_count=0 ;; esac || true - case "$info_count" in ''|*[!0-9]*) info_count=0 ;; esac || true - case "$style_count" in ''|*[!0-9]*) style_count=0 ;; esac || true - total=$((error_count + warning_count + info_count + style_count)) || total=0 - - echo "### Hadolint Results for ${name}" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "- **Total Issues**: $total" >> $GITHUB_STEP_SUMMARY - - if [ "$error_count" -gt 0 ]; then - echo "- ❌ **Errors**: $error_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$warning_count" -gt 0 ]; then - echo "- âš ī¸ **Warnings**: $warning_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$info_count" -gt 0 ]; then - echo "- â„šī¸ **Info**: $info_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$style_count" -gt 0 ]; then - echo "- 🎨 **Style**: $style_count" >> $GITHUB_STEP_SUMMARY - fi - - if [ "$total" -gt 0 ]; then - echo "" >> $GITHUB_STEP_SUMMARY - echo "âš ī¸ **Please review the Hadolint report artifact and consider fixing the issues.**" >> $GITHUB_STEP_SUMMARY - else - echo "" >> $GITHUB_STEP_SUMMARY - echo "✅ **No issues found!**" >> $GITHUB_STEP_SUMMARY - fi - fi - shell: bash - - - name: Upload Hadolint report as artifact - if: always() - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: hadolint-report-${{ inputs.name }} - path: ${{ inputs.output-file }} - - - name: Run ReviewDog (Hadolint) - if: ${{ inputs.enable-reviewdog == 'true' }} - uses: reviewdog/action-hadolint@fc7ee4a9f71e521bc43e370819247b70e5327540 # 1.50.2 - with: - github_token: ${{ inputs.github_token }} - reporter: github-pr-review - level: warning - hadolint_flags: ${{ inputs.dockerfile }} - - - name: Fail if Hadolint found issues - if: inputs.fail-on-findings == 'true' - shell: bash - env: - output_file: ${{ inputs.output-file }} - run: | - if [ -f "${output_file}" ]; then - issue_count=$(grep -E "(DL|SC)[0-9]+" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - if [ "$issue_count" -gt 0 ]; then - echo "❌ Hadolint found $issue_count issue(s). Failing the job." - exit 1 - fi - fi +name: Run Hadolint +description: Lint Dockerfiles using Hadolint + +inputs: + dockerfile: + description: Path to Dockerfile + required: true + output-file: + description: Path to output file for lint results + required: true + name: + description: Name for the artifact + required: true + enable-reviewdog: + description: Enable ReviewDog PR comments + required: false + default: "false" + github_token: + description: GitHub token for ReviewDog + required: false + fail-on-findings: + description: "Whether to fail the action if issues are found" + required: false + default: "true" + +runs: + using: "composite" + steps: + - name: Install Hadolint + run: | + curl -sSL -o hadolint https://github.com/hadolint/hadolint/releases/download/v2.12.0/hadolint-Linux-x86_64 + chmod +x hadolint + sudo mv hadolint /usr/local/bin/ + shell: bash + + - name: Run Hadolint + id: run-hadolint + env: + dockerfile: ${{ inputs.dockerfile }} + output_file: ${{ inputs.output-file }} + run: | + hadolint ${dockerfile} \ + --format tty \ + 2>&1 | tee ${output_file} || true + if [ ! -f "${output_file}" ]; then + echo "No Dockerfile found or hadolint produced no output" > ${output_file} + fi + shell: bash + + - name: Analyze Hadolint results + if: always() + env: + output_file: ${{ inputs.output-file }} + name: ${{ inputs.name }} + run: | + if [ -f "${output_file}" ]; then + # Count issues by severity (hadolint format: DL#### or SC#### followed by colored severity) + # Pattern matches: DL3008 or SC1091 (hadolint and shellcheck codes) + error_count=$(grep -E "(DL|SC)[0-9]+" "${output_file}" 2>/dev/null | grep -i "error" | wc -l | tr -d '[:space:]' || echo "0") + warning_count=$(grep -E "(DL|SC)[0-9]+" "${output_file}" 2>/dev/null | grep -i "warning" | wc -l | tr -d '[:space:]' || echo "0") + info_count=$(grep -E "(DL|SC)[0-9]+" "${output_file}" 2>/dev/null | grep -i "info" | wc -l | tr -d '[:space:]' || echo "0") + style_count=$(grep -E "(DL|SC)[0-9]+" "${output_file}" 2>/dev/null | grep -i "style" | wc -l | tr -d '[:space:]' || echo "0") + # Ensure counts are valid integers, default to 0 if empty + error_count=${error_count:-0} + warning_count=${warning_count:-0} + info_count=${info_count:-0} + style_count=${style_count:-0} + # Additional safety check - ensure numeric (use case to validate) + case "$error_count" in ''|*[!0-9]*) error_count=0 ;; esac || true + case "$warning_count" in ''|*[!0-9]*) warning_count=0 ;; esac || true + case "$info_count" in ''|*[!0-9]*) info_count=0 ;; esac || true + case "$style_count" in ''|*[!0-9]*) style_count=0 ;; esac || true + total=$((error_count + warning_count + info_count + style_count)) || total=0 + + echo "### Hadolint Results for ${name}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- **Total Issues**: $total" >> $GITHUB_STEP_SUMMARY + + if [ "$error_count" -gt 0 ]; then + echo "- ❌ **Errors**: $error_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$warning_count" -gt 0 ]; then + echo "- âš ī¸ **Warnings**: $warning_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$info_count" -gt 0 ]; then + echo "- â„šī¸ **Info**: $info_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$style_count" -gt 0 ]; then + echo "- 🎨 **Style**: $style_count" >> $GITHUB_STEP_SUMMARY + fi + + if [ "$total" -gt 0 ]; then + echo "" >> $GITHUB_STEP_SUMMARY + echo "âš ī¸ **Please review the Hadolint report artifact and consider fixing the issues.**" >> $GITHUB_STEP_SUMMARY + else + echo "" >> $GITHUB_STEP_SUMMARY + echo "✅ **No issues found!**" >> $GITHUB_STEP_SUMMARY + fi + fi + shell: bash + + - name: Upload Hadolint report as artifact + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: hadolint-report-${{ inputs.name }} + path: ${{ inputs.output-file }} + + - name: Run ReviewDog (Hadolint) + if: ${{ inputs.enable-reviewdog == 'true' }} + uses: reviewdog/action-hadolint@fc7ee4a9f71e521bc43e370819247b70e5327540 # 1.50.2 + with: + github_token: ${{ inputs.github_token }} + reporter: github-pr-review + level: warning + hadolint_flags: ${{ inputs.dockerfile }} + + - name: Fail if Hadolint found issues + if: inputs.fail-on-findings == 'true' + shell: bash + env: + output_file: ${{ inputs.output-file }} + run: | + if [ -f "${output_file}" ]; then + issue_count=$(grep -E "(DL|SC)[0-9]+" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + if [ "$issue_count" -gt 0 ]; then + echo "❌ Hadolint found $issue_count issue(s). Failing the job." + exit 1 + fi + fi diff --git a/.github/actions/common/license-namespace-checker/action.yaml b/.github/actions/common/license-namespace-checker/action.yaml index 8ab137ff..3de71684 100644 --- a/.github/actions/common/license-namespace-checker/action.yaml +++ b/.github/actions/common/license-namespace-checker/action.yaml @@ -1,108 +1,108 @@ -name: 'License and Namespace Checker' -description: 'Checks license headers and namespace usage in headers' -inputs: - name: - description: 'Name for the output artifact' - required: false - default: 'license-namespace-check-report' - path: - description: 'Path to the repository root' - required: false - default: '.' - fail-on-findings: - description: "Whether to fail the action if issues are found" - required: false - default: "true" -runs: - using: 'composite' - steps: - - name: Get list of changed files - shell: bash - id: discover-changes - env: - REPO_PATH: ${{ inputs.path }} - run: | - cd "${REPO_PATH}" - if [ "$(git rev-parse --abbrev-ref HEAD)" != "main" ]; then - git fetch origin main:main - echo "Fetched main branch" - fi - changed_files=$(git diff --name-only main...$GITHUB_SHA -- '*.h' '*.hpp' '*.c' '*.cpp' '*.sh' '*.py' '*.txt' | grep -E '.*\.(h|hpp|c|cpp|sh|py|txt)$' || true) - echo "Performed git diff" - if [ -n "$changed_files" ]; then - # Add changed files list to the GITHUB_OUTPUT separated by spaces - echo "changed_files=$(echo $changed_files)" >> $GITHUB_OUTPUT - echo "Changed files:" - echo "$changed_files" - else - # No changed files, explicitly set the exit code to 0 - echo "No changed files detected." - exit 0 - fi - - - name: Check License header and namespace usage in headers - id: license-check - shell: bash - env: - CHANGED_FILES: ${{ steps.discover-changes.outputs.changed_files }} - REPO_PATH: ${{ inputs.path }} - output_file: license-check-report.txt - run: | - if [ -z "${CHANGED_FILES}" ]; then - echo "No new files to scan." | tee "${output_file}" - echo "ISSUES_FOUND=false" >> $GITHUB_OUTPUT - else - if "${GITHUB_ACTION_PATH}/run.sh" "${REPO_PATH}" $CHANGED_FILES 2>&1 | tee "${output_file}"; then - echo "ISSUES_FOUND=false" >> $GITHUB_OUTPUT - else - echo "ISSUES_FOUND=true" >> $GITHUB_OUTPUT - fi - fi - - - name: Upload License Check report - if: always() - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: ${{ inputs.name }} - path: license-check-report.txt - if-no-files-found: warn - - - name: Analyze License Check results - if: always() - shell: bash - env: - output_file: license-check-report.txt - run: | - if [ "${{ steps.license-check.outputs.ISSUES_FOUND }}" == "true" ]; then - # Count files with issues - if [ -f "${output_file}" ]; then - error_count=$(grep -c "Error:" "${output_file}" 2>/dev/null || echo "0") - echo "### License & Namespace Check Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "- ❌ **Status**: Issues found" >> $GITHUB_STEP_SUMMARY - echo "- 🔍 **Total errors**: ${error_count}" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "📄 **See job logs for detailed error messages.**" >> $GITHUB_STEP_SUMMARY - fi - elif [ "${{ steps.discover-changes.outputs.changed_files }}" != "" ]; then - echo "### License & Namespace Check Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "✅ **All checked files have correct license headers and namespace usage!**" >> $GITHUB_STEP_SUMMARY - else - echo "### License & Namespace Check Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "â„šī¸ **No relevant files changed - check skipped**" >> $GITHUB_STEP_SUMMARY - fi - - - name: Fail if license/namespace issues found - if: inputs.fail-on-findings == 'true' && steps.license-check.outputs.ISSUES_FOUND == 'true' - shell: bash - run: | - echo "❌ License or namespace issues found. Failing the job." - exit 1 - - - name: Clean up - if: always() - shell: bash - run: | +name: 'License and Namespace Checker' +description: 'Checks license headers and namespace usage in headers' +inputs: + name: + description: 'Name for the output artifact' + required: false + default: 'license-namespace-check-report' + path: + description: 'Path to the repository root' + required: false + default: '.' + fail-on-findings: + description: "Whether to fail the action if issues are found" + required: false + default: "true" +runs: + using: 'composite' + steps: + - name: Get list of changed files + shell: bash + id: discover-changes + env: + REPO_PATH: ${{ inputs.path }} + run: | + cd "${REPO_PATH}" + if [ "$(git rev-parse --abbrev-ref HEAD)" != "main" ]; then + git fetch origin main:main + echo "Fetched main branch" + fi + changed_files=$(git diff --name-only main...$GITHUB_SHA -- '*.h' '*.hpp' '*.c' '*.cpp' '*.sh' '*.py' '*.txt' | grep -E '.*\.(h|hpp|c|cpp|sh|py|txt)$' || true) + echo "Performed git diff" + if [ -n "$changed_files" ]; then + # Add changed files list to the GITHUB_OUTPUT separated by spaces + echo "changed_files=$(echo $changed_files)" >> $GITHUB_OUTPUT + echo "Changed files:" + echo "$changed_files" + else + # No changed files, explicitly set the exit code to 0 + echo "No changed files detected." + exit 0 + fi + + - name: Check License header and namespace usage in headers + id: license-check + shell: bash + env: + CHANGED_FILES: ${{ steps.discover-changes.outputs.changed_files }} + REPO_PATH: ${{ inputs.path }} + output_file: license-check-report.txt + run: | + if [ -z "${CHANGED_FILES}" ]; then + echo "No new files to scan." | tee "${output_file}" + echo "ISSUES_FOUND=false" >> $GITHUB_OUTPUT + else + if "${GITHUB_ACTION_PATH}/run.sh" "${REPO_PATH}" $CHANGED_FILES 2>&1 | tee "${output_file}"; then + echo "ISSUES_FOUND=false" >> $GITHUB_OUTPUT + else + echo "ISSUES_FOUND=true" >> $GITHUB_OUTPUT + fi + fi + + - name: Upload License Check report + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ${{ inputs.name }} + path: license-check-report.txt + if-no-files-found: warn + + - name: Analyze License Check results + if: always() + shell: bash + env: + output_file: license-check-report.txt + run: | + if [ "${{ steps.license-check.outputs.ISSUES_FOUND }}" == "true" ]; then + # Count files with issues + if [ -f "${output_file}" ]; then + error_count=$(grep -c "Error:" "${output_file}" 2>/dev/null || echo "0") + echo "### License & Namespace Check Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- ❌ **Status**: Issues found" >> $GITHUB_STEP_SUMMARY + echo "- 🔍 **Total errors**: ${error_count}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "📄 **See job logs for detailed error messages.**" >> $GITHUB_STEP_SUMMARY + fi + elif [ "${{ steps.discover-changes.outputs.changed_files }}" != "" ]; then + echo "### License & Namespace Check Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "✅ **All checked files have correct license headers and namespace usage!**" >> $GITHUB_STEP_SUMMARY + else + echo "### License & Namespace Check Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "â„šī¸ **No relevant files changed - check skipped**" >> $GITHUB_STEP_SUMMARY + fi + + - name: Fail if license/namespace issues found + if: inputs.fail-on-findings == 'true' && steps.license-check.outputs.ISSUES_FOUND == 'true' + shell: bash + run: | + echo "❌ License or namespace issues found. Failing the job." + exit 1 + + - name: Clean up + if: always() + shell: bash + run: | rm -f license-check-report.txt \ No newline at end of file diff --git a/.github/actions/common/pylint/action.yaml b/.github/actions/common/pylint/action.yaml index 361e1116..0518b0f0 100644 --- a/.github/actions/common/pylint/action.yaml +++ b/.github/actions/common/pylint/action.yaml @@ -1,154 +1,154 @@ -name: Run Pylint -description: Lint Python files using pylint - -inputs: - path: - description: Path to Python files or folder - required: true - output-file: - description: Path to store the pylint output - required: true - name: - description: Name of the artifact - required: true - enable-reviewdog: - description: Enable ReviewDog PR comments - required: false - default: "false" - github_token: - description: GitHub token for ReviewDog - required: false - fail-on-findings: - description: "Whether to fail the action if issues are found" - required: false - default: "true" - -runs: - using: "composite" - steps: - - name: Set up Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 #v5.6.0 - with: - python-version: '3.11' - - - name: Install Pylint - run: | - python -m pip install --upgrade pip - pip install pylint - shell: bash - - - name: Run pylint - id: run-pylint - env: - path: ${{ inputs.path }} - output_file: ${{ inputs.output-file }} - run: | - # Run pylint on all Python files at once for a single comprehensive score - echo "🔍 Searching for Python files in: ${path}" - python_files=$(find "${path}" -name "*.py" -not -path "*/venv/*" 2>/dev/null || true) - - if [ -n "${python_files}" ]; then - echo "📝 Found Python files, running pylint..." - find "${path}" -name "*.py" -not -path "*/venv/*" -print0 | xargs -0 pylint 2>&1 | tee "${output_file}" || true - else - echo "âš ī¸ No Python files found in ${path}" | tee "${output_file}" - fi - - if [ ! -f "${output_file}" ]; then - echo "No Python files found or pylint produced no output" > "${output_file}" - fi - shell: bash - - - name: Analyze Pylint results - if: always() - env: - output_file: ${{ inputs.output-file }} - name: ${{ inputs.name }} - run: | - if [ -f "${output_file}" ]; then - # Count issues by severity (pylint uses C/R/W/E/F prefixes) - ensure we get single clean numbers - convention_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: C[0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - refactor_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: R[0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - warning_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: W[0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - error_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: E[0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - fatal_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: F[0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - # Ensure counts are valid integers, default to 0 if empty - convention_count=${convention_count:-0} - refactor_count=${refactor_count:-0} - warning_count=${warning_count:-0} - error_count=${error_count:-0} - fatal_count=${fatal_count:-0} - # Additional safety check - ensure numeric (use case to validate) - set returns true - case "$convention_count" in ''|*[!0-9]*) convention_count=0 ;; esac || true - case "$refactor_count" in ''|*[!0-9]*) refactor_count=0 ;; esac || true - case "$warning_count" in ''|*[!0-9]*) warning_count=0 ;; esac || true - case "$error_count" in ''|*[!0-9]*) error_count=0 ;; esac || true - case "$fatal_count" in ''|*[!0-9]*) fatal_count=0 ;; esac || true - total=$((convention_count + refactor_count + warning_count + error_count + fatal_count)) || total=0 - - # Try to extract the score - ensure this doesn't fail - score=$(grep "Your code has been rated at" "${output_file}" 2>/dev/null | tail -1 | grep -oE "[0-9]+\.[0-9]+/10" 2>/dev/null || echo "") - - echo "### Pylint Results for ${name}" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "- **Total Issues**: $total" >> $GITHUB_STEP_SUMMARY - - if [ "$fatal_count" -gt 0 ]; then - echo "- 🔴 **Fatal**: $fatal_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$error_count" -gt 0 ]; then - echo "- ❌ **Errors**: $error_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$warning_count" -gt 0 ]; then - echo "- âš ī¸ **Warnings**: $warning_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$refactor_count" -gt 0 ]; then - echo "- 🔧 **Refactor**: $refactor_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$convention_count" -gt 0 ]; then - echo "- 📋 **Convention**: $convention_count" >> $GITHUB_STEP_SUMMARY - fi - - if [ -n "$score" ]; then - echo "- 📊 **Score**: $score" >> $GITHUB_STEP_SUMMARY - fi - - if [ "$total" -gt 0 ]; then - echo "" >> $GITHUB_STEP_SUMMARY - echo "âš ī¸ **Please review the Pylint report artifact and consider fixing the issues.**" >> $GITHUB_STEP_SUMMARY - else - echo "" >> $GITHUB_STEP_SUMMARY - echo "✅ **No issues found!**" >> $GITHUB_STEP_SUMMARY - fi - fi - shell: bash - - - name: Upload Pylint report - if: always() - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: pylint-report-${{ inputs.name }} - path: ${{ inputs.output-file }} - - - name: Run ReviewDog (Pylint) - if: ${{ inputs.enable-reviewdog == 'true' }} - uses: dciborow/action-pylint@cf6c9bcd79e4aa70e2fbeaf6332d741eb1e0bff8 #0.1.1 - with: - github_token: ${{ inputs.github_token }} - reporter: github-pr-review - level: warning - workdir: ${{ inputs.path }} - - - name: Fail if Pylint found issues - if: inputs.fail-on-findings == 'true' - shell: bash - env: - output_file: ${{ inputs.output-file }} - run: | - if [ -f "${output_file}" ]; then - issue_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: [CRWEF][0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - if [ "$issue_count" -gt 0 ]; then - echo "❌ Pylint found $issue_count issue(s). Failing the job." - exit 1 - fi - fi +name: Run Pylint +description: Lint Python files using pylint + +inputs: + path: + description: Path to Python files or folder + required: true + output-file: + description: Path to store the pylint output + required: true + name: + description: Name of the artifact + required: true + enable-reviewdog: + description: Enable ReviewDog PR comments + required: false + default: "false" + github_token: + description: GitHub token for ReviewDog + required: false + fail-on-findings: + description: "Whether to fail the action if issues are found" + required: false + default: "true" + +runs: + using: "composite" + steps: + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 #v5.6.0 + with: + python-version: '3.11' + + - name: Install Pylint + run: | + python -m pip install --upgrade pip + pip install pylint + shell: bash + + - name: Run pylint + id: run-pylint + env: + path: ${{ inputs.path }} + output_file: ${{ inputs.output-file }} + run: | + # Run pylint on all Python files at once for a single comprehensive score + echo "🔍 Searching for Python files in: ${path}" + python_files=$(find "${path}" -name "*.py" -not -path "*/venv/*" 2>/dev/null || true) + + if [ -n "${python_files}" ]; then + echo "📝 Found Python files, running pylint..." + find "${path}" -name "*.py" -not -path "*/venv/*" -print0 | xargs -0 pylint 2>&1 | tee "${output_file}" || true + else + echo "âš ī¸ No Python files found in ${path}" | tee "${output_file}" + fi + + if [ ! -f "${output_file}" ]; then + echo "No Python files found or pylint produced no output" > "${output_file}" + fi + shell: bash + + - name: Analyze Pylint results + if: always() + env: + output_file: ${{ inputs.output-file }} + name: ${{ inputs.name }} + run: | + if [ -f "${output_file}" ]; then + # Count issues by severity (pylint uses C/R/W/E/F prefixes) - ensure we get single clean numbers + convention_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: C[0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + refactor_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: R[0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + warning_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: W[0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + error_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: E[0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + fatal_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: F[0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + # Ensure counts are valid integers, default to 0 if empty + convention_count=${convention_count:-0} + refactor_count=${refactor_count:-0} + warning_count=${warning_count:-0} + error_count=${error_count:-0} + fatal_count=${fatal_count:-0} + # Additional safety check - ensure numeric (use case to validate) - set returns true + case "$convention_count" in ''|*[!0-9]*) convention_count=0 ;; esac || true + case "$refactor_count" in ''|*[!0-9]*) refactor_count=0 ;; esac || true + case "$warning_count" in ''|*[!0-9]*) warning_count=0 ;; esac || true + case "$error_count" in ''|*[!0-9]*) error_count=0 ;; esac || true + case "$fatal_count" in ''|*[!0-9]*) fatal_count=0 ;; esac || true + total=$((convention_count + refactor_count + warning_count + error_count + fatal_count)) || total=0 + + # Try to extract the score - ensure this doesn't fail + score=$(grep "Your code has been rated at" "${output_file}" 2>/dev/null | tail -1 | grep -oE "[0-9]+\.[0-9]+/10" 2>/dev/null || echo "") + + echo "### Pylint Results for ${name}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- **Total Issues**: $total" >> $GITHUB_STEP_SUMMARY + + if [ "$fatal_count" -gt 0 ]; then + echo "- 🔴 **Fatal**: $fatal_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$error_count" -gt 0 ]; then + echo "- ❌ **Errors**: $error_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$warning_count" -gt 0 ]; then + echo "- âš ī¸ **Warnings**: $warning_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$refactor_count" -gt 0 ]; then + echo "- 🔧 **Refactor**: $refactor_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$convention_count" -gt 0 ]; then + echo "- 📋 **Convention**: $convention_count" >> $GITHUB_STEP_SUMMARY + fi + + if [ -n "$score" ]; then + echo "- 📊 **Score**: $score" >> $GITHUB_STEP_SUMMARY + fi + + if [ "$total" -gt 0 ]; then + echo "" >> $GITHUB_STEP_SUMMARY + echo "âš ī¸ **Please review the Pylint report artifact and consider fixing the issues.**" >> $GITHUB_STEP_SUMMARY + else + echo "" >> $GITHUB_STEP_SUMMARY + echo "✅ **No issues found!**" >> $GITHUB_STEP_SUMMARY + fi + fi + shell: bash + + - name: Upload Pylint report + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: pylint-report-${{ inputs.name }} + path: ${{ inputs.output-file }} + + - name: Run ReviewDog (Pylint) + if: ${{ inputs.enable-reviewdog == 'true' }} + uses: dciborow/action-pylint@cf6c9bcd79e4aa70e2fbeaf6332d741eb1e0bff8 #0.1.1 + with: + github_token: ${{ inputs.github_token }} + reporter: github-pr-review + level: warning + workdir: ${{ inputs.path }} + + - name: Fail if Pylint found issues + if: inputs.fail-on-findings == 'true' + shell: bash + env: + output_file: ${{ inputs.output-file }} + run: | + if [ -f "${output_file}" ]; then + issue_count=$(grep -E "^[^:]+:[0-9]+:[0-9]+: [CRWEF][0-9]+:" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + if [ "$issue_count" -gt 0 ]; then + echo "❌ Pylint found $issue_count issue(s). Failing the job." + exit 1 + fi + fi diff --git a/.github/actions/common/shellcheck/action.yaml b/.github/actions/common/shellcheck/action.yaml index b672bcbf..10ed923b 100644 --- a/.github/actions/common/shellcheck/action.yaml +++ b/.github/actions/common/shellcheck/action.yaml @@ -1,141 +1,141 @@ -name: Run ShellCheck -description: Lint shell scripts using shellcheck - -inputs: - path: - description: Path to shell scripts or folder - required: true - output-file: - description: Path to store the shellcheck output - required: true - name: - description: Name of the artifact - required: true - enable-reviewdog: - description: Enable ReviewDog PR comments - required: false - default: "false" - github_token: - description: GitHub token for ReviewDog - required: false - fail-on-findings: - description: "Whether to fail the action if issues are found" - required: false - default: "true" - -runs: - using: "composite" - steps: - - name: Install ShellCheck - run: sudo apt-get install -y shellcheck - shell: bash - - - name: Run ShellCheck - id: run-shellcheck - env: - path: ${{ inputs.path }} - output_file: ${{ inputs.output-file }} - run: | - find ${path} -name "*.sh" -exec shellcheck {} \; 2>&1 | tee ${output_file} || true - if [ ! -f "${output_file}" ]; then - echo "No shell scripts found or shellcheck produced no output" > ${output_file} - fi - shell: bash - - - name: Analyze ShellCheck results - if: always() - id: analyze-results - env: - output_file: ${{ inputs.output-file }} - name: ${{ inputs.name }} - run: | - if [ -f "${output_file}" ]; then - # Count issues by severity - shellcheck format is "SC#### (level):" - error_count=$(grep -E "SC[0-9]+ \(error\):" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - warning_count=$(grep -E "SC[0-9]+ \(warning\):" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - info_count=$(grep -E "SC[0-9]+ \(info\):" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - note_count=$(grep -E "SC[0-9]+ \(note\):" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - - # Ensure counts are valid integers, default to 0 if empty - error_count=${error_count:-0} - warning_count=${warning_count:-0} - info_count=${info_count:-0} - note_count=${note_count:-0} - - # Additional safety check - ensure numeric (use case to validate) - case "$error_count" in ''|*[!0-9]*) error_count=0 ;; esac || true - case "$warning_count" in ''|*[!0-9]*) warning_count=0 ;; esac || true - case "$info_count" in ''|*[!0-9]*) info_count=0 ;; esac || true - case "$note_count" in ''|*[!0-9]*) note_count=0 ;; esac || true - - total=$((error_count + warning_count + info_count + note_count)) || total=0 - - # Export error_count for use in the final step - echo "error_count=$error_count" >> $GITHUB_OUTPUT - echo "total_count=$total" >> $GITHUB_OUTPUT - - echo "### ShellCheck Results for ${name}" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "- **Total Issues**: $total" >> $GITHUB_STEP_SUMMARY - - if [ "$error_count" -gt 0 ]; then - echo "- 🔴 **Errors**: $error_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$warning_count" -gt 0 ]; then - echo "- âš ī¸ **Warnings**: $warning_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$info_count" -gt 0 ]; then - echo "- â„šī¸ **Info**: $info_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$note_count" -gt 0 ]; then - echo "- 📝 **Notes**: $note_count" >> $GITHUB_STEP_SUMMARY - fi - - if [ "$error_count" -gt 0 ]; then - echo "" >> $GITHUB_STEP_SUMMARY - echo "âš ī¸ **Please review the ShellCheck report artifact and fix the errors.**" >> $GITHUB_STEP_SUMMARY - else - echo "" >> $GITHUB_STEP_SUMMARY - echo "✅ **No errors found!**" >> $GITHUB_STEP_SUMMARY - if [ "$total" -gt 0 ]; then - echo "📋 **Note**: There are warnings/info/notes that should be reviewed but won't fail the build." >> $GITHUB_STEP_SUMMARY - fi - fi - else - echo "error_count=0" >> $GITHUB_OUTPUT - echo "total_count=0" >> $GITHUB_OUTPUT - fi - shell: bash - - - name: Upload ShellCheck report - if: always() - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: shellcheck-report-${{ inputs.name }} - path: ${{ inputs.output-file }} - - - name: Run ReviewDog (ShellCheck) - if: ${{ inputs.enable-reviewdog == 'true' }} - uses: reviewdog/action-shellcheck@4c07458293ac342d477251099501a718ae5ef86e # 1.32.0 - with: - path: . - github_token: ${{ inputs.github_token }} - reporter: github-pr-review - level: warning - - - name: Fail if ShellCheck found errors - if: inputs.fail-on-findings == 'true' - shell: bash - run: | - error_count="${{ steps.analyze-results.outputs.error_count }}" - total_count="${{ steps.analyze-results.outputs.total_count }}" - - if [ "$error_count" -gt 0 ]; then - echo "❌ ShellCheck found $error_count error(s). Failing the job." - exit 1 - elif [ "$total_count" -gt 0 ]; then - echo "✅ ShellCheck found $total_count issue(s) but no errors. Job will continue." - echo "📋 Issues found: warnings, info, or notes that should be reviewed." - else - echo "✅ No ShellCheck issues found." - fi +name: Run ShellCheck +description: Lint shell scripts using shellcheck + +inputs: + path: + description: Path to shell scripts or folder + required: true + output-file: + description: Path to store the shellcheck output + required: true + name: + description: Name of the artifact + required: true + enable-reviewdog: + description: Enable ReviewDog PR comments + required: false + default: "false" + github_token: + description: GitHub token for ReviewDog + required: false + fail-on-findings: + description: "Whether to fail the action if issues are found" + required: false + default: "true" + +runs: + using: "composite" + steps: + - name: Install ShellCheck + run: sudo apt-get install -y shellcheck + shell: bash + + - name: Run ShellCheck + id: run-shellcheck + env: + path: ${{ inputs.path }} + output_file: ${{ inputs.output-file }} + run: | + find ${path} -name "*.sh" -exec shellcheck {} \; 2>&1 | tee ${output_file} || true + if [ ! -f "${output_file}" ]; then + echo "No shell scripts found or shellcheck produced no output" > ${output_file} + fi + shell: bash + + - name: Analyze ShellCheck results + if: always() + id: analyze-results + env: + output_file: ${{ inputs.output-file }} + name: ${{ inputs.name }} + run: | + if [ -f "${output_file}" ]; then + # Count issues by severity - shellcheck format is "SC#### (level):" + error_count=$(grep -E "SC[0-9]+ \(error\):" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + warning_count=$(grep -E "SC[0-9]+ \(warning\):" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + info_count=$(grep -E "SC[0-9]+ \(info\):" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + note_count=$(grep -E "SC[0-9]+ \(note\):" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + + # Ensure counts are valid integers, default to 0 if empty + error_count=${error_count:-0} + warning_count=${warning_count:-0} + info_count=${info_count:-0} + note_count=${note_count:-0} + + # Additional safety check - ensure numeric (use case to validate) + case "$error_count" in ''|*[!0-9]*) error_count=0 ;; esac || true + case "$warning_count" in ''|*[!0-9]*) warning_count=0 ;; esac || true + case "$info_count" in ''|*[!0-9]*) info_count=0 ;; esac || true + case "$note_count" in ''|*[!0-9]*) note_count=0 ;; esac || true + + total=$((error_count + warning_count + info_count + note_count)) || total=0 + + # Export error_count for use in the final step + echo "error_count=$error_count" >> $GITHUB_OUTPUT + echo "total_count=$total" >> $GITHUB_OUTPUT + + echo "### ShellCheck Results for ${name}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- **Total Issues**: $total" >> $GITHUB_STEP_SUMMARY + + if [ "$error_count" -gt 0 ]; then + echo "- 🔴 **Errors**: $error_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$warning_count" -gt 0 ]; then + echo "- âš ī¸ **Warnings**: $warning_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$info_count" -gt 0 ]; then + echo "- â„šī¸ **Info**: $info_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$note_count" -gt 0 ]; then + echo "- 📝 **Notes**: $note_count" >> $GITHUB_STEP_SUMMARY + fi + + if [ "$error_count" -gt 0 ]; then + echo "" >> $GITHUB_STEP_SUMMARY + echo "âš ī¸ **Please review the ShellCheck report artifact and fix the errors.**" >> $GITHUB_STEP_SUMMARY + else + echo "" >> $GITHUB_STEP_SUMMARY + echo "✅ **No errors found!**" >> $GITHUB_STEP_SUMMARY + if [ "$total" -gt 0 ]; then + echo "📋 **Note**: There are warnings/info/notes that should be reviewed but won't fail the build." >> $GITHUB_STEP_SUMMARY + fi + fi + else + echo "error_count=0" >> $GITHUB_OUTPUT + echo "total_count=0" >> $GITHUB_OUTPUT + fi + shell: bash + + - name: Upload ShellCheck report + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: shellcheck-report-${{ inputs.name }} + path: ${{ inputs.output-file }} + + - name: Run ReviewDog (ShellCheck) + if: ${{ inputs.enable-reviewdog == 'true' }} + uses: reviewdog/action-shellcheck@4c07458293ac342d477251099501a718ae5ef86e # 1.32.0 + with: + path: . + github_token: ${{ inputs.github_token }} + reporter: github-pr-review + level: warning + + - name: Fail if ShellCheck found errors + if: inputs.fail-on-findings == 'true' + shell: bash + run: | + error_count="${{ steps.analyze-results.outputs.error_count }}" + total_count="${{ steps.analyze-results.outputs.total_count }}" + + if [ "$error_count" -gt 0 ]; then + echo "❌ ShellCheck found $error_count error(s). Failing the job." + exit 1 + elif [ "$total_count" -gt 0 ]; then + echo "✅ ShellCheck found $total_count issue(s) but no errors. Job will continue." + echo "📋 Issues found: warnings, info, or notes that should be reviewed." + else + echo "✅ No ShellCheck issues found." + fi diff --git a/.github/actions/common/yamllint/action.yaml b/.github/actions/common/yamllint/action.yaml index 667319fa..e85d1305 100644 --- a/.github/actions/common/yamllint/action.yaml +++ b/.github/actions/common/yamllint/action.yaml @@ -1,113 +1,113 @@ -name: Run Yamllint -description: Lint YAML files using yamllint - -inputs: - path: - description: Path to YAML files or folder - required: true - output-file: - description: Path to store the yamllint output - required: true - name: - description: Name of the artifact - required: true - enable-reviewdog: - description: Enable ReviewDog PR comments - required: false - default: "false" - github_token: - description: GitHub token for ReviewDog - required: false - fail-on-findings: - description: "Whether to fail the action if issues are found" - required: false - default: "true" - -runs: - using: "composite" - steps: - - name: Install Yamllint - run: sudo apt-get install -y yamllint - shell: bash - - - name: Run Yamllint - id: run-yamllint - env: - path: ${{ inputs.path }} - output_file: ${{ inputs.output-file }} - run: | - yamllint ${path} 2>&1 | tee ${output_file} || true - if [ ! -f "${output_file}" ]; then - echo "No YAML files found or yamllint produced no output" > ${output_file} - fi - echo "✅ Report created at: $(pwd)/${output_file}" - ls -lh ${output_file} - shell: bash - - - name: Analyze Yamllint results - if: always() - env: - output_file: ${{ inputs.output-file }} - name: ${{ inputs.name }} - run: | - if [ -f "${output_file}" ]; then - # Count issues by severity (yamllint uses ::error and ::warning in GitHub Actions format) - error_count=$(grep "::error" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - warning_count=$(grep "::warning" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - # Ensure counts are valid integers, default to 0 if empty - error_count=${error_count:-0} - warning_count=${warning_count:-0} - # Additional safety check - ensure numeric (use case to validate) - case "$error_count" in ''|*[!0-9]*) error_count=0 ;; esac || true - case "$warning_count" in ''|*[!0-9]*) warning_count=0 ;; esac || true - total=$((error_count + warning_count)) || total=0 - - echo "### Yamllint Results for ${name}" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "- **Total Issues**: $total" >> $GITHUB_STEP_SUMMARY - - if [ "$error_count" -gt 0 ]; then - echo "- ❌ **Errors**: $error_count" >> $GITHUB_STEP_SUMMARY - fi - if [ "$warning_count" -gt 0 ]; then - echo "- âš ī¸ **Warnings**: $warning_count" >> $GITHUB_STEP_SUMMARY - fi - - if [ "$total" -gt 0 ]; then - echo "" >> $GITHUB_STEP_SUMMARY - echo "âš ī¸ **Please review the Yamllint report artifact and consider fixing the issues.**" >> $GITHUB_STEP_SUMMARY - else - echo "" >> $GITHUB_STEP_SUMMARY - echo "✅ **No issues found!**" >> $GITHUB_STEP_SUMMARY - fi - fi - shell: bash - - - name: Upload Yamllint report - if: always() - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: yamllint-report-${{ inputs.name }} - path: ${{ inputs.output-file }} - - - name: Run ReviewDog (Yamllint) - if: ${{ inputs.enable-reviewdog == 'true' }} - uses: reviewdog/action-yamllint@f01d8a48fd8d89f89895499fca2cff09f9e9e8c0 # 1.21.0 - with: - github_token: ${{ inputs.github_token }} - reporter: github-pr-review - level: warning - - - name: Fail if Yamllint found issues - if: inputs.fail-on-findings == 'true' - shell: bash - env: - output_file: ${{ inputs.output-file }} - run: | - if [ -f "${output_file}" ]; then - issue_count=$(grep -E "::error|::warning" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") - if [ "$issue_count" -gt 0 ]; then - echo "❌ Yamllint found $issue_count issue(s). Failing the job." - exit 1 - fi - fi +name: Run Yamllint +description: Lint YAML files using yamllint + +inputs: + path: + description: Path to YAML files or folder + required: true + output-file: + description: Path to store the yamllint output + required: true + name: + description: Name of the artifact + required: true + enable-reviewdog: + description: Enable ReviewDog PR comments + required: false + default: "false" + github_token: + description: GitHub token for ReviewDog + required: false + fail-on-findings: + description: "Whether to fail the action if issues are found" + required: false + default: "true" + +runs: + using: "composite" + steps: + - name: Install Yamllint + run: sudo apt-get install -y yamllint + shell: bash + + - name: Run Yamllint + id: run-yamllint + env: + path: ${{ inputs.path }} + output_file: ${{ inputs.output-file }} + run: | + yamllint ${path} 2>&1 | tee ${output_file} || true + if [ ! -f "${output_file}" ]; then + echo "No YAML files found or yamllint produced no output" > ${output_file} + fi + echo "✅ Report created at: $(pwd)/${output_file}" + ls -lh ${output_file} + shell: bash + + - name: Analyze Yamllint results + if: always() + env: + output_file: ${{ inputs.output-file }} + name: ${{ inputs.name }} + run: | + if [ -f "${output_file}" ]; then + # Count issues by severity (yamllint uses ::error and ::warning in GitHub Actions format) + error_count=$(grep "::error" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + warning_count=$(grep "::warning" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + # Ensure counts are valid integers, default to 0 if empty + error_count=${error_count:-0} + warning_count=${warning_count:-0} + # Additional safety check - ensure numeric (use case to validate) + case "$error_count" in ''|*[!0-9]*) error_count=0 ;; esac || true + case "$warning_count" in ''|*[!0-9]*) warning_count=0 ;; esac || true + total=$((error_count + warning_count)) || total=0 + + echo "### Yamllint Results for ${name}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- **Total Issues**: $total" >> $GITHUB_STEP_SUMMARY + + if [ "$error_count" -gt 0 ]; then + echo "- ❌ **Errors**: $error_count" >> $GITHUB_STEP_SUMMARY + fi + if [ "$warning_count" -gt 0 ]; then + echo "- âš ī¸ **Warnings**: $warning_count" >> $GITHUB_STEP_SUMMARY + fi + + if [ "$total" -gt 0 ]; then + echo "" >> $GITHUB_STEP_SUMMARY + echo "âš ī¸ **Please review the Yamllint report artifact and consider fixing the issues.**" >> $GITHUB_STEP_SUMMARY + else + echo "" >> $GITHUB_STEP_SUMMARY + echo "✅ **No issues found!**" >> $GITHUB_STEP_SUMMARY + fi + fi + shell: bash + + - name: Upload Yamllint report + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: yamllint-report-${{ inputs.name }} + path: ${{ inputs.output-file }} + + - name: Run ReviewDog (Yamllint) + if: ${{ inputs.enable-reviewdog == 'true' }} + uses: reviewdog/action-yamllint@f01d8a48fd8d89f89895499fca2cff09f9e9e8c0 # 1.21.0 + with: + github_token: ${{ inputs.github_token }} + reporter: github-pr-review + level: warning + + - name: Fail if Yamllint found issues + if: inputs.fail-on-findings == 'true' + shell: bash + env: + output_file: ${{ inputs.output-file }} + run: | + if [ -f "${output_file}" ]; then + issue_count=$(grep -E "::error|::warning" "${output_file}" 2>/dev/null | wc -l | tr -d '[:space:]' || echo "0") + if [ "$issue_count" -gt 0 ]; then + echo "❌ Yamllint found $issue_count issue(s). Failing the job." + exit 1 + fi + fi diff --git a/.github/workflows/dls-build-and-test-deb_pkgs-and-deb_imgs.yaml b/.github/workflows/dls-build-and-test-deb_pkgs-and-deb_imgs.yaml index dc118ae9..94860b44 100644 --- a/.github/workflows/dls-build-and-test-deb_pkgs-and-deb_imgs.yaml +++ b/.github/workflows/dls-build-and-test-deb_pkgs-and-deb_imgs.yaml @@ -1,328 +1,328 @@ -name: "[DLS] [U22/24] Build and test .deb pkgs & deb imgs" -run-name: "[DLS] [U22/24] Build and test .deb pkgs & deb imgs (by ${{ github.actor }})" -on: - workflow_call: - inputs: - test-repo-branch: - description: "Branch in dl-streamer-tests repo (default is main)" - required: false - type: string - default: "main" - workflow_dispatch: - inputs: - test-repo-branch: - description: "Branch in dl-streamer-tests repo (default is main)" - required: false - type: string - default: "main" -permissions: {} -env: - dlstreamer-version: "2025.2.0" - MODELS_PATH: "$HOME/models" - VIDEO_INPUTS_PATH: "$HOME/videos" - DLS_REL_PATH: "./dlstreamer-repo" - -jobs: - build: - name: "[${{ matrix.runner_print_label }}] Build and test ${{ matrix.ubuntu_version }} .debs & deb imgs" - runs-on: ${{ matrix.runner_labels }} - permissions: - contents: read - packages: read - strategy: - fail-fast: false - matrix: - include: - - path_dockerfile: $DLS_REL_PATH/docker/ubuntu/ubuntu22.Dockerfile - ubuntu_version: ubuntu22 - runner_labels: [self-hosted, dlstreamer, TGL, ubuntu22] # Build and run tests on Tiger Lake system with Ubuntu 22 - runner_print_label: TGL - - path_dockerfile: $DLS_REL_PATH/docker/ubuntu/ubuntu24.Dockerfile - ubuntu_version: ubuntu24 - runner_labels: [self-hosted, dlstreamer, TGL, ubuntu24] # Build and run tests on Tiger Lake system with Ubuntu 24 - runner_print_label: TGL - - path_dockerfile: $DLS_REL_PATH/docker/ubuntu/ubuntu24.Dockerfile - ubuntu_version: ubuntu24 - runner_labels: [self-hosted, dlstreamer, ARL, ubuntu24] # Build and run tests on Arrow Lake system with Ubuntu 24 - runner_print_label: ARL - steps: - - name: Initial environment clean - run: | - sudo rm -rf dlstreamer-repo - - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Init submodules - run: | - cd dlstreamer-repo - git submodule update --init thirdparty/spdlog - - - name: Check out test repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #4.2.2 - with: - repository: open-edge-platform/dl-streamer-tests - persist-credentials: false - ref: ${{ inputs.test-repo-branch }} - path: dl-streamer-tests-repo - - - name: Copy DL Streamer tests repo - run: | - cp -r dl-streamer-tests-repo/functional_tests $PWD/$DLS_REL_PATH/tests - - - # ======================================================== BUILDING PART ======================================================== - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 #3.11.1 - - - name: Log in to GitHub Container Registry - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef #3.6.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: đŸšĸ Build deb final img with cache from GHCR - env: - deb_final_img: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:${{ github.sha }} - deb_final_img_cached: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:buildcache - run: | - docker buildx build \ - --load \ - --target dlstreamer \ - --tag "${deb_final_img}" \ - --cache-from="${deb_final_img_cached}" \ - --build-arg DLSTREAMER_VERSION=${{ env.dlstreamer-version }} \ - --build-arg DLSTREAMER_BUILD_NUMBER=deb-pkg-${{ matrix.ubuntu_version }} \ - -f ${{ matrix.path_dockerfile }} \ - ${{ env.DLS_REL_PATH }} - - - name: Build dlstreamer img with python - env: - deb_final_img: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:${{ github.sha }} - deb_final_testing_img: ghcr.io/${{ github.repository }}/deb-final-testing-img-${{ matrix.ubuntu_version }}:${{ github.sha }} - id: dlstreamer-with-python - run: | - docker build \ - -f $DLS_REL_PATH/docker/ubuntu/ubuntu-testing.Dockerfile \ - -t $deb_final_testing_img \ - --build-arg BASE_IMAGE=$deb_final_img \ - ${{ env.DLS_REL_PATH }} - - - name: đŸ“Ļ Extract .deb packages using script - env: - deb_final_img: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:${{ github.sha }} - run: | - chmod +x ${{ env.DLS_REL_PATH }}/scripts/extract_and_verify_debs.sh - ${{ env.DLS_REL_PATH }}/scripts/extract_and_verify_debs.sh ${deb_final_img} - ls - cp -r deb_packages ${{ env.DLS_REL_PATH }} - - - # ======================================================== DOCKER TESTING PART ======================================================== - - name: Check models - run: | - echo "## 📂 ${{ matrix.runner_print_label }} Environment checks" >> $GITHUB_STEP_SUMMARY - echo "Test repo branch: $test_repo_branch" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - if [ -d "${{ env.MODELS_PATH }}" ] && [ "$(ls -A "${{ env.MODELS_PATH }}")" ]; then - echo "Models: found ✅" >> $GITHUB_STEP_SUMMARY - else - echo "Models: folder not found or it is empty ❌" >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - - name: Check videos - run: | - if [ -d "${{ env.VIDEO_INPUTS_PATH }}" ] && [ "$(ls -A "${{ env.VIDEO_INPUTS_PATH }}")" ]; then - echo "Tests input videos: found ✅" >> $GITHUB_STEP_SUMMARY - else - echo "Tests input videos: folder not found or it is empty ❌" >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - - name: Init docker tests configurations - run: | - echo "## đŸšĸ ${{ matrix.runner_print_label }} Docker tests summary" >> $GITHUB_STEP_SUMMARY - if [[ ${{ matrix.runner_print_label }} == "TGL" ]]; then - echo "DOCKER_TEST_CONFIGS=docker/aliveness_TGL.json" >> $GITHUB_ENV - echo "Selected test configuration file: docker/aliveness_TGL.json" >> $GITHUB_STEP_SUMMARY - elif [[ ${{ matrix.runner_print_label }} == "ARL" ]]; then - echo "DOCKER_TEST_CONFIGS=docker/aliveness_ARL.json" >> $GITHUB_ENV - echo "Selected test configuration file: docker/aliveness_ARL.json" >> $GITHUB_STEP_SUMMARY - else - echo "Test configuration file: not found ❌" >> $GITHUB_STEP_SUMMARY - echo "❌ Cannot assign correct test configuration JSON file for runner based on label: ${{ matrix.runner_print_label }}" - echo "Please review and fix! Exiting..." - exit 1 - fi - - - name: 🚀 Run Docker tests - id: run_docker_tests - env: - deb_final_testing_img: ghcr.io/${{ github.repository }}/deb-final-testing-img-${{ matrix.ubuntu_version }}:${{ github.sha }} - DOCKER_TEST_CONFIGS: ${{ env.DOCKER_TEST_CONFIGS }} - run: | - cd $PWD/$DLS_REL_PATH/tests - mkdir functional_tests_results - cd functional_tests_results - DLS_TESTS_RESULTS_PATH=$PWD - echo "DLS_TESTS_RESULTS_PATH=$DLS_TESTS_RESULTS_PATH" >> $GITHUB_ENV - cd ../functional_tests - - ./run_tests.sh \ - --models-path=${{ env.MODELS_PATH }} \ - --video-examples-path=${{ env.VIDEO_INPUTS_PATH }} \ - --test-configs="$DOCKER_TEST_CONFIGS" \ - --results-path="$DLS_TESTS_RESULTS_PATH" \ - --report-name="DLS_functional_tests_${{ matrix.runner_print_label }}_docker_${{ matrix.ubuntu_version }}_results" \ - --image-name="${deb_final_testing_img}" - - - name: Upload Docker test results - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 - if: steps.run_docker_tests.outcome == 'success' - with: - name: DLS_functional_tests_${{ matrix.runner_print_label }}_docker_${{ matrix.ubuntu_version }}_results - path: "${{ env.DLS_TESTS_RESULTS_PATH }}" - - - name: Print Docker tests summary in workflow - if: steps.run_docker_tests.outcome == 'success' - env: - dls_tests_results_path: ${{ env.DLS_TESTS_RESULTS_PATH }} - test_repo_branch: ${{ inputs.test-repo-branch }} - run: | - sed 's/\[pass\]/:white_check_mark:/g; s/\[\! FAIL \!\]/:x:/g' "${dls_tests_results_path}"/DLS_functional_tests_${{ matrix.runner_print_label }}_docker_${{ matrix.ubuntu_version }}_results.txt >> $GITHUB_STEP_SUMMARY - - - name: Clean up tests results - if: always () - env: - DLS_TESTS_RESULTS_PATH: ${{ env.DLS_TESTS_RESULTS_PATH }} - run: | - rm -rf "$DLS_TESTS_RESULTS_PATH" - - - # ======================================================== OH HOST TESTING PART ======================================================== - - name: Link DL Streamer to home directory - run: | - ln -s $PWD/$DLS_REL_PATH $HOME - - - name: Install DL Streamer on-host - run: | - $DLS_REL_PATH/tests/scripts/installation-on-host-entrypoint.sh $DLS_REL_PATH/deb_packages - - - name: 👋 Test hello_dlstreamer script on host - id: test_hello_dlstreamer_script - run: | - mkdir -p $DLS_REL_PATH/test_hello_dlstreamer - cd $DLS_REL_PATH/test_hello_dlstreamer - export MODELS_PATH=${{ env.MODELS_PATH }} - echo "Running hello_dlstreamer.sh on CPU with model yolo11s" - /opt/intel/dlstreamer/scripts/hello_dlstreamer.sh --output=file --device=CPU --model=yolo11s --precision=INT8 - OUTPUT_FILE_FOUND=0 - echo "## đŸ’ģ ${{ matrix.runner_print_label }} On host tests summary" >> $GITHUB_STEP_SUMMARY - for file in *.mp4; do - # Check if the file exists and is at least 1kB in size - if [[ -f "$file" && $(stat --printf="%s" "$file") -ge 1024 ]]; then - echo "✅ Valid .mp4 file found: $file" - echo "Testing hello_dlstreamer.sh script: PASS ✅" >> $GITHUB_STEP_SUMMARY - OUTPUT_FILE_FOUND=1 - fi - done - if [[ $OUTPUT_FILE_FOUND -eq 0 ]]; then - echo "❌ No .mp4 file(s) found or it has less than 1kB size. Exiting..." - echo "Testing hello_dlstreamer.sh script: FAIL ❌" >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - - name: Upload hello_dlstreamer output video - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 - if: steps.test_hello_dlstreamer_script.outcome == 'success' - with: - name: DLS_hello_dlstreamer_output_video_${{ matrix.runner_print_label }}_${{ matrix.ubuntu_version }}_CPU_yolo11s - path: ${{ env.DLS_REL_PATH }}/test_hello_dlstreamer - - - name: Init on host tests configurations - run: | - if [[ ${{ matrix.runner_print_label }} == "TGL" ]]; then - echo "HOST_TEST_CONFIGS=on_host/samples_TGL.json" >> $GITHUB_ENV - echo "Selected test configuration file: on_host/samples_TGL.json" >> $GITHUB_STEP_SUMMARY - elif [[ ${{ matrix.runner_print_label }} == "ARL" ]]; then - echo "HOST_TEST_CONFIGS=on_host/samples_ARL.json" >> $GITHUB_ENV - echo "Selected test configuration file: on_host/samples_ARL.json" >> $GITHUB_STEP_SUMMARY - else - echo "Test configuration file: not found ❌" >> $GITHUB_STEP_SUMMARY - echo "❌ Cannot assign correct test configuration JSON file for runner based on label: ${{ matrix.runner_print_label }}" - echo "Please review and fix! Exiting..." - exit 1 - fi - - - name: Init host test environemtnt - run: | - sudo apt install -y libcairo2-dev libgirepository1.0-dev - if [[ ${{ matrix.ubuntu_version }} == "ubuntu24" ]]; then - sudo apt install -y libopencv-dev - fi - if [ -d "$HOME/.virtualenvs" ]; then - echo "Directory $HOME/.virtualenvs exists - removing it" - rm -rf $HOME/.virtualenvs - fi - mkdir -p $HOME/.virtualenvs/dlstreamer - python3 -m venv $HOME/.virtualenvs/dlstreamer - $HOME/.virtualenvs/dlstreamer/bin/pip install --no-cache-dir --upgrade pip -r $DLS_REL_PATH/requirements.txt - - - name: 🚀 Run on host tests - id: run_onhost_tests - env: - HOST_TEST_CONFIGS: ${{ env.HOST_TEST_CONFIGS }} - run: | - cd $PWD/$DLS_REL_PATH/tests - mkdir functional_tests_results - cd functional_tests_results - DLS_TESTS_RESULTS_PATH=$PWD - echo "DLS_TESTS_RESULTS_PATH=$DLS_TESTS_RESULTS_PATH" >> $GITHUB_ENV - cd ../functional_tests - - source $HOME/.virtualenvs/dlstreamer/bin/activate - ./run_tests.sh \ - --models-path=${{ env.MODELS_PATH }} \ - --video-examples-path=${{ env.VIDEO_INPUTS_PATH }} \ - --test-configs="$HOST_TEST_CONFIGS" \ - --results-path="$DLS_TESTS_RESULTS_PATH" \ - --report-name="DLS_functional_tests_${{ matrix.runner_print_label }}_on_host_${{ matrix.ubuntu_version }}_results" \ - --on-host - deactivate - - - name: Upload host test results - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 - if: steps.run_onhost_tests.outcome == 'success' - with: - name: DLS_functional_tests_${{ matrix.runner_print_label }}_on_host_${{ matrix.ubuntu_version }}_results - path: "${{ env.DLS_TESTS_RESULTS_PATH }}" - - - name: Print host tests summary in workflow - if: steps.run_onhost_tests.outcome == 'success' - env: - dls_tests_results_path: ${{ env.DLS_TESTS_RESULTS_PATH }} - test_repo_branch: ${{ inputs.test-repo-branch }} - run: | - sed 's/\[pass\]/:white_check_mark:/g; s/\[\! FAIL \!\]/:x:/g' "${dls_tests_results_path}"/DLS_functional_tests_${{ matrix.runner_print_label }}_on_host_${{ matrix.ubuntu_version }}_results.txt >> $GITHUB_STEP_SUMMARY - - - name: Uninstall dlstreamer - if: always () - run: | - $DLS_REL_PATH/tests/scripts/uninstall-dlstreamer.sh - - # ======================================================== CLEAUP PART ======================================================== - - name: Clean up - env: - deb_final_img: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:${{ github.sha }} - if: always() - run: | - rm -rf $HOME/.virtualenvs - docker rmi ${deb_final_img} || true - sudo rm -f $HOME/dlstreamer-repo - rm -rf dlstreamer-repo - rm -rf $HOME/dl-streamer-tests-repo +name: "[DLS] [U22/24] Build and test .deb pkgs & deb imgs" +run-name: "[DLS] [U22/24] Build and test .deb pkgs & deb imgs (by ${{ github.actor }})" +on: + workflow_call: + inputs: + test-repo-branch: + description: "Branch in dl-streamer-tests repo (default is main)" + required: false + type: string + default: "main" + workflow_dispatch: + inputs: + test-repo-branch: + description: "Branch in dl-streamer-tests repo (default is main)" + required: false + type: string + default: "main" +permissions: {} +env: + dlstreamer-version: "2025.2.0" + MODELS_PATH: "$HOME/models" + VIDEO_INPUTS_PATH: "$HOME/videos" + DLS_REL_PATH: "./dlstreamer-repo" + +jobs: + build: + name: "[${{ matrix.runner_print_label }}] Build and test ${{ matrix.ubuntu_version }} .debs & deb imgs" + runs-on: ${{ matrix.runner_labels }} + permissions: + contents: read + packages: read + strategy: + fail-fast: false + matrix: + include: + - path_dockerfile: $DLS_REL_PATH/docker/ubuntu/ubuntu22.Dockerfile + ubuntu_version: ubuntu22 + runner_labels: [self-hosted, dlstreamer, TGL, ubuntu22] # Build and run tests on Tiger Lake system with Ubuntu 22 + runner_print_label: TGL + - path_dockerfile: $DLS_REL_PATH/docker/ubuntu/ubuntu24.Dockerfile + ubuntu_version: ubuntu24 + runner_labels: [self-hosted, dlstreamer, TGL, ubuntu24] # Build and run tests on Tiger Lake system with Ubuntu 24 + runner_print_label: TGL + - path_dockerfile: $DLS_REL_PATH/docker/ubuntu/ubuntu24.Dockerfile + ubuntu_version: ubuntu24 + runner_labels: [self-hosted, dlstreamer, ARL, ubuntu24] # Build and run tests on Arrow Lake system with Ubuntu 24 + runner_print_label: ARL + steps: + - name: Initial environment clean + run: | + sudo rm -rf dlstreamer-repo + + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Init submodules + run: | + cd dlstreamer-repo + git submodule update --init thirdparty/spdlog + + - name: Check out test repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #4.2.2 + with: + repository: open-edge-platform/dl-streamer-tests + persist-credentials: false + ref: ${{ inputs.test-repo-branch }} + path: dl-streamer-tests-repo + + - name: Copy DL Streamer tests repo + run: | + cp -r dl-streamer-tests-repo/functional_tests $PWD/$DLS_REL_PATH/tests + + + # ======================================================== BUILDING PART ======================================================== + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 #3.11.1 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef #3.6.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: đŸšĸ Build deb final img with cache from GHCR + env: + deb_final_img: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:${{ github.sha }} + deb_final_img_cached: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:buildcache + run: | + docker buildx build \ + --load \ + --target dlstreamer \ + --tag "${deb_final_img}" \ + --cache-from="${deb_final_img_cached}" \ + --build-arg DLSTREAMER_VERSION=${{ env.dlstreamer-version }} \ + --build-arg DLSTREAMER_BUILD_NUMBER=deb-pkg-${{ matrix.ubuntu_version }} \ + -f ${{ matrix.path_dockerfile }} \ + ${{ env.DLS_REL_PATH }} + + - name: Build dlstreamer img with python + env: + deb_final_img: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:${{ github.sha }} + deb_final_testing_img: ghcr.io/${{ github.repository }}/deb-final-testing-img-${{ matrix.ubuntu_version }}:${{ github.sha }} + id: dlstreamer-with-python + run: | + docker build \ + -f $DLS_REL_PATH/docker/ubuntu/ubuntu-testing.Dockerfile \ + -t $deb_final_testing_img \ + --build-arg BASE_IMAGE=$deb_final_img \ + ${{ env.DLS_REL_PATH }} + + - name: đŸ“Ļ Extract .deb packages using script + env: + deb_final_img: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:${{ github.sha }} + run: | + chmod +x ${{ env.DLS_REL_PATH }}/scripts/extract_and_verify_debs.sh + ${{ env.DLS_REL_PATH }}/scripts/extract_and_verify_debs.sh ${deb_final_img} + ls + cp -r deb_packages ${{ env.DLS_REL_PATH }} + + + # ======================================================== DOCKER TESTING PART ======================================================== + - name: Check models + run: | + echo "## 📂 ${{ matrix.runner_print_label }} Environment checks" >> $GITHUB_STEP_SUMMARY + echo "Test repo branch: $test_repo_branch" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + if [ -d "${{ env.MODELS_PATH }}" ] && [ "$(ls -A "${{ env.MODELS_PATH }}")" ]; then + echo "Models: found ✅" >> $GITHUB_STEP_SUMMARY + else + echo "Models: folder not found or it is empty ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + fi + + - name: Check videos + run: | + if [ -d "${{ env.VIDEO_INPUTS_PATH }}" ] && [ "$(ls -A "${{ env.VIDEO_INPUTS_PATH }}")" ]; then + echo "Tests input videos: found ✅" >> $GITHUB_STEP_SUMMARY + else + echo "Tests input videos: folder not found or it is empty ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + fi + + - name: Init docker tests configurations + run: | + echo "## đŸšĸ ${{ matrix.runner_print_label }} Docker tests summary" >> $GITHUB_STEP_SUMMARY + if [[ ${{ matrix.runner_print_label }} == "TGL" ]]; then + echo "DOCKER_TEST_CONFIGS=docker/aliveness_TGL.json" >> $GITHUB_ENV + echo "Selected test configuration file: docker/aliveness_TGL.json" >> $GITHUB_STEP_SUMMARY + elif [[ ${{ matrix.runner_print_label }} == "ARL" ]]; then + echo "DOCKER_TEST_CONFIGS=docker/aliveness_ARL.json" >> $GITHUB_ENV + echo "Selected test configuration file: docker/aliveness_ARL.json" >> $GITHUB_STEP_SUMMARY + else + echo "Test configuration file: not found ❌" >> $GITHUB_STEP_SUMMARY + echo "❌ Cannot assign correct test configuration JSON file for runner based on label: ${{ matrix.runner_print_label }}" + echo "Please review and fix! Exiting..." + exit 1 + fi + + - name: 🚀 Run Docker tests + id: run_docker_tests + env: + deb_final_testing_img: ghcr.io/${{ github.repository }}/deb-final-testing-img-${{ matrix.ubuntu_version }}:${{ github.sha }} + DOCKER_TEST_CONFIGS: ${{ env.DOCKER_TEST_CONFIGS }} + run: | + cd $PWD/$DLS_REL_PATH/tests + mkdir functional_tests_results + cd functional_tests_results + DLS_TESTS_RESULTS_PATH=$PWD + echo "DLS_TESTS_RESULTS_PATH=$DLS_TESTS_RESULTS_PATH" >> $GITHUB_ENV + cd ../functional_tests + + ./run_tests.sh \ + --models-path=${{ env.MODELS_PATH }} \ + --video-examples-path=${{ env.VIDEO_INPUTS_PATH }} \ + --test-configs="$DOCKER_TEST_CONFIGS" \ + --results-path="$DLS_TESTS_RESULTS_PATH" \ + --report-name="DLS_functional_tests_${{ matrix.runner_print_label }}_docker_${{ matrix.ubuntu_version }}_results" \ + --image-name="${deb_final_testing_img}" + + - name: Upload Docker test results + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 + if: steps.run_docker_tests.outcome == 'success' + with: + name: DLS_functional_tests_${{ matrix.runner_print_label }}_docker_${{ matrix.ubuntu_version }}_results + path: "${{ env.DLS_TESTS_RESULTS_PATH }}" + + - name: Print Docker tests summary in workflow + if: steps.run_docker_tests.outcome == 'success' + env: + dls_tests_results_path: ${{ env.DLS_TESTS_RESULTS_PATH }} + test_repo_branch: ${{ inputs.test-repo-branch }} + run: | + sed 's/\[pass\]/:white_check_mark:/g; s/\[\! FAIL \!\]/:x:/g' "${dls_tests_results_path}"/DLS_functional_tests_${{ matrix.runner_print_label }}_docker_${{ matrix.ubuntu_version }}_results.txt >> $GITHUB_STEP_SUMMARY + + - name: Clean up tests results + if: always () + env: + DLS_TESTS_RESULTS_PATH: ${{ env.DLS_TESTS_RESULTS_PATH }} + run: | + rm -rf "$DLS_TESTS_RESULTS_PATH" + + + # ======================================================== OH HOST TESTING PART ======================================================== + - name: Link DL Streamer to home directory + run: | + ln -s $PWD/$DLS_REL_PATH $HOME + + - name: Install DL Streamer on-host + run: | + $DLS_REL_PATH/tests/scripts/installation-on-host-entrypoint.sh $DLS_REL_PATH/deb_packages + + - name: 👋 Test hello_dlstreamer script on host + id: test_hello_dlstreamer_script + run: | + mkdir -p $DLS_REL_PATH/test_hello_dlstreamer + cd $DLS_REL_PATH/test_hello_dlstreamer + export MODELS_PATH=${{ env.MODELS_PATH }} + echo "Running hello_dlstreamer.sh on CPU with model yolo11s" + /opt/intel/dlstreamer/scripts/hello_dlstreamer.sh --output=file --device=CPU --model=yolo11s --precision=INT8 + OUTPUT_FILE_FOUND=0 + echo "## đŸ’ģ ${{ matrix.runner_print_label }} On host tests summary" >> $GITHUB_STEP_SUMMARY + for file in *.mp4; do + # Check if the file exists and is at least 1kB in size + if [[ -f "$file" && $(stat --printf="%s" "$file") -ge 1024 ]]; then + echo "✅ Valid .mp4 file found: $file" + echo "Testing hello_dlstreamer.sh script: PASS ✅" >> $GITHUB_STEP_SUMMARY + OUTPUT_FILE_FOUND=1 + fi + done + if [[ $OUTPUT_FILE_FOUND -eq 0 ]]; then + echo "❌ No .mp4 file(s) found or it has less than 1kB size. Exiting..." + echo "Testing hello_dlstreamer.sh script: FAIL ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + fi + + - name: Upload hello_dlstreamer output video + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 + if: steps.test_hello_dlstreamer_script.outcome == 'success' + with: + name: DLS_hello_dlstreamer_output_video_${{ matrix.runner_print_label }}_${{ matrix.ubuntu_version }}_CPU_yolo11s + path: ${{ env.DLS_REL_PATH }}/test_hello_dlstreamer + + - name: Init on host tests configurations + run: | + if [[ ${{ matrix.runner_print_label }} == "TGL" ]]; then + echo "HOST_TEST_CONFIGS=on_host/samples_TGL.json" >> $GITHUB_ENV + echo "Selected test configuration file: on_host/samples_TGL.json" >> $GITHUB_STEP_SUMMARY + elif [[ ${{ matrix.runner_print_label }} == "ARL" ]]; then + echo "HOST_TEST_CONFIGS=on_host/samples_ARL.json" >> $GITHUB_ENV + echo "Selected test configuration file: on_host/samples_ARL.json" >> $GITHUB_STEP_SUMMARY + else + echo "Test configuration file: not found ❌" >> $GITHUB_STEP_SUMMARY + echo "❌ Cannot assign correct test configuration JSON file for runner based on label: ${{ matrix.runner_print_label }}" + echo "Please review and fix! Exiting..." + exit 1 + fi + + - name: Init host test environemtnt + run: | + sudo apt install -y libcairo2-dev libgirepository1.0-dev + if [[ ${{ matrix.ubuntu_version }} == "ubuntu24" ]]; then + sudo apt install -y libopencv-dev + fi + if [ -d "$HOME/.virtualenvs" ]; then + echo "Directory $HOME/.virtualenvs exists - removing it" + rm -rf $HOME/.virtualenvs + fi + mkdir -p $HOME/.virtualenvs/dlstreamer + python3 -m venv $HOME/.virtualenvs/dlstreamer + $HOME/.virtualenvs/dlstreamer/bin/pip install --no-cache-dir --upgrade pip -r $DLS_REL_PATH/requirements.txt + + - name: 🚀 Run on host tests + id: run_onhost_tests + env: + HOST_TEST_CONFIGS: ${{ env.HOST_TEST_CONFIGS }} + run: | + cd $PWD/$DLS_REL_PATH/tests + mkdir functional_tests_results + cd functional_tests_results + DLS_TESTS_RESULTS_PATH=$PWD + echo "DLS_TESTS_RESULTS_PATH=$DLS_TESTS_RESULTS_PATH" >> $GITHUB_ENV + cd ../functional_tests + + source $HOME/.virtualenvs/dlstreamer/bin/activate + ./run_tests.sh \ + --models-path=${{ env.MODELS_PATH }} \ + --video-examples-path=${{ env.VIDEO_INPUTS_PATH }} \ + --test-configs="$HOST_TEST_CONFIGS" \ + --results-path="$DLS_TESTS_RESULTS_PATH" \ + --report-name="DLS_functional_tests_${{ matrix.runner_print_label }}_on_host_${{ matrix.ubuntu_version }}_results" \ + --on-host + deactivate + + - name: Upload host test results + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 + if: steps.run_onhost_tests.outcome == 'success' + with: + name: DLS_functional_tests_${{ matrix.runner_print_label }}_on_host_${{ matrix.ubuntu_version }}_results + path: "${{ env.DLS_TESTS_RESULTS_PATH }}" + + - name: Print host tests summary in workflow + if: steps.run_onhost_tests.outcome == 'success' + env: + dls_tests_results_path: ${{ env.DLS_TESTS_RESULTS_PATH }} + test_repo_branch: ${{ inputs.test-repo-branch }} + run: | + sed 's/\[pass\]/:white_check_mark:/g; s/\[\! FAIL \!\]/:x:/g' "${dls_tests_results_path}"/DLS_functional_tests_${{ matrix.runner_print_label }}_on_host_${{ matrix.ubuntu_version }}_results.txt >> $GITHUB_STEP_SUMMARY + + - name: Uninstall dlstreamer + if: always () + run: | + $DLS_REL_PATH/tests/scripts/uninstall-dlstreamer.sh + + # ======================================================== CLEAUP PART ======================================================== + - name: Clean up + env: + deb_final_img: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:${{ github.sha }} + if: always() + run: | + rm -rf $HOME/.virtualenvs + docker rmi ${deb_final_img} || true + sudo rm -f $HOME/dlstreamer-repo + rm -rf dlstreamer-repo + rm -rf $HOME/dl-streamer-tests-repo diff --git a/.github/workflows/dls-build-and-test-windows.yaml b/.github/workflows/dls-build-and-test-windows.yaml index b4f7ee03..734fdc4a 100644 --- a/.github/workflows/dls-build-and-test-windows.yaml +++ b/.github/workflows/dls-build-and-test-windows.yaml @@ -1,125 +1,125 @@ -name: "[DLS] [WIN] Build Windows DLLs" -run-name: "[DLS] [WIN] Build Windows DLLs (by ${{ github.actor }})" -on: - workflow_call: - inputs: - test-repo-branch: - description: "Branch in dl-streamer-tests repo (default is main)" - required: false - type: string - default: "main" - workflow_dispatch: - inputs: - test-repo-branch: - description: "Branch in dl-streamer-tests repo (default is main)" - required: false - type: string - default: "main" -permissions: {} -env: - DLS_TARGET_DIRECTORY: C:\dlstreamer - DLS_REPO_TARGET_DIRECTORY: C:\dlstreamer_repo - - -jobs: - build: - name: "[${{ matrix.runner_print_label }}] Build Windows DLLs" - runs-on: ${{ matrix.runner_labels }} - permissions: - contents: read - strategy: - fail-fast: false - matrix: - include: - - runner_labels: [self-hosted, dlstreamer, ARL, windows] # Build and run tests on Arrow Lake system with Ubuntu 24 - runner_print_label: ARL - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - submodules: false - fetch-depth: 1 - - - name: Init submodules - run: | - cd dlstreamer-repo - git submodule update --init thirdparty/spdlog - git submodule update --init thirdparty/googletest - - - name: Copy DL Streamer repo - shell: powershell - run: | - if (Test-Path ${{ env.DLS_REPO_TARGET_DIRECTORY }}) { - Remove-Item -Recurse -Force $target - } - New-Item -ItemType Directory -Path ${{ env.DLS_REPO_TARGET_DIRECTORY }} | Out-Null - Copy-Item -Path "dlstreamer-repo\*" -Destination ${{ env.DLS_REPO_TARGET_DIRECTORY }} -Recurse -Force - - # ======================================================== BUILDING PART ======================================================== - - name: đŸ“Ļ Build DL Streamer DLLs - shell: powershell - run: | - cd ${{ env.DLS_REPO_TARGET_DIRECTORY }} - & ./scripts/build_dlstreamer_dlls.ps1 - - - name: Copy DLLs - shell: powershell - run: | - $target = "${{ env.DLS_TARGET_DIRECTORY }}" - $source = "C:\dlstreamer_tmp\build\intel64\Release\bin" - Write-Host "`nContents of source directory ($source):" - Get-ChildItem -Path $source -Recurse - if (Test-Path $target) { - Remove-Item -Recurse -Force $target - } - - # Copy DLLs - New-Item -ItemType Directory -Path $target | Out-Null - Copy-Item -Path "$source\*" -Destination $target -Recurse -Force - - # Copy script - $scriptSource = "$PWD\dlstreamer-repo\scripts\setup_dls_env.ps1" - $scriptDest = Join-Path $target "setup_dls_env.ps1" - Copy-Item -Path $scriptSource -Destination $scriptDest -Force - - Write-Host "`nContents of target directory after copying ($target):" - Get-ChildItem -Path $target -Recurse - - - name: Print list of DLLs with sizes - shell: powershell - run: | - $files = Get-ChildItem -Path ${{ env.DLS_TARGET_DIRECTORY }} -Recurse -Filter *.dll -File - - $table = $files | ForEach-Object { - "| $($_.Name) | $([math]::Round($_.Length / 1KB, 2)) KB |" - } - $count = $files.Count - - $summary = @() - $summary += "### List of built DLL files" - $summary += "" - $summary += "| File | Size |" - $summary += "|------|--------|" - $summary += $table - $summary += "" - $summary += "**Count of files:** $count" - - $summary -join "`n" | Out-File -FilePath $env:GITHUB_STEP_SUMMARY -Encoding utf8 -Append - - # ======================================================== CLEAUP PART ======================================================== - - name: Clean up - if: always() - shell: powershell - run: | - Remove-Item -Path .\dlstreamer-repo\ -Force -Recurse - if (Test-Path "C:\dlstreamer_tmp\build") { - Remove-Item -Recurse -Force "C:\dlstreamer_tmp\build" - } - if (Test-Path ${{ env.DLS_TARGET_DIRECTORY }}) { - Remove-Item -Recurse -Force ${{ env.DLS_TARGET_DIRECTORY }} - } - if (Test-Path ${{ env.DLS_REPO_TARGET_DIRECTORY }}) { - Remove-Item -Recurse -Force ${{ env.DLS_REPO_TARGET_DIRECTORY }} - } +name: "[DLS] [WIN] Build Windows DLLs" +run-name: "[DLS] [WIN] Build Windows DLLs (by ${{ github.actor }})" +on: + workflow_call: + inputs: + test-repo-branch: + description: "Branch in dl-streamer-tests repo (default is main)" + required: false + type: string + default: "main" + workflow_dispatch: + inputs: + test-repo-branch: + description: "Branch in dl-streamer-tests repo (default is main)" + required: false + type: string + default: "main" +permissions: {} +env: + DLS_TARGET_DIRECTORY: C:\dlstreamer + DLS_REPO_TARGET_DIRECTORY: C:\dlstreamer_repo + + +jobs: + build: + name: "[${{ matrix.runner_print_label }}] Build Windows DLLs" + runs-on: ${{ matrix.runner_labels }} + permissions: + contents: read + strategy: + fail-fast: false + matrix: + include: + - runner_labels: [self-hosted, dlstreamer, ARL, windows] # Build and run tests on Arrow Lake system with Ubuntu 24 + runner_print_label: ARL + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + submodules: false + fetch-depth: 1 + + - name: Init submodules + run: | + cd dlstreamer-repo + git submodule update --init thirdparty/spdlog + git submodule update --init thirdparty/googletest + + - name: Copy DL Streamer repo + shell: powershell + run: | + if (Test-Path ${{ env.DLS_REPO_TARGET_DIRECTORY }}) { + Remove-Item -Recurse -Force $target + } + New-Item -ItemType Directory -Path ${{ env.DLS_REPO_TARGET_DIRECTORY }} | Out-Null + Copy-Item -Path "dlstreamer-repo\*" -Destination ${{ env.DLS_REPO_TARGET_DIRECTORY }} -Recurse -Force + + # ======================================================== BUILDING PART ======================================================== + - name: đŸ“Ļ Build DL Streamer DLLs + shell: powershell + run: | + cd ${{ env.DLS_REPO_TARGET_DIRECTORY }} + & ./scripts/build_dlstreamer_dlls.ps1 + + - name: Copy DLLs + shell: powershell + run: | + $target = "${{ env.DLS_TARGET_DIRECTORY }}" + $source = "C:\dlstreamer_tmp\build\intel64\Release\bin" + Write-Host "`nContents of source directory ($source):" + Get-ChildItem -Path $source -Recurse + if (Test-Path $target) { + Remove-Item -Recurse -Force $target + } + + # Copy DLLs + New-Item -ItemType Directory -Path $target | Out-Null + Copy-Item -Path "$source\*" -Destination $target -Recurse -Force + + # Copy script + $scriptSource = "$PWD\dlstreamer-repo\scripts\setup_dls_env.ps1" + $scriptDest = Join-Path $target "setup_dls_env.ps1" + Copy-Item -Path $scriptSource -Destination $scriptDest -Force + + Write-Host "`nContents of target directory after copying ($target):" + Get-ChildItem -Path $target -Recurse + + - name: Print list of DLLs with sizes + shell: powershell + run: | + $files = Get-ChildItem -Path ${{ env.DLS_TARGET_DIRECTORY }} -Recurse -Filter *.dll -File + + $table = $files | ForEach-Object { + "| $($_.Name) | $([math]::Round($_.Length / 1KB, 2)) KB |" + } + $count = $files.Count + + $summary = @() + $summary += "### List of built DLL files" + $summary += "" + $summary += "| File | Size |" + $summary += "|------|--------|" + $summary += $table + $summary += "" + $summary += "**Count of files:** $count" + + $summary -join "`n" | Out-File -FilePath $env:GITHUB_STEP_SUMMARY -Encoding utf8 -Append + + # ======================================================== CLEAUP PART ======================================================== + - name: Clean up + if: always() + shell: powershell + run: | + Remove-Item -Path .\dlstreamer-repo\ -Force -Recurse + if (Test-Path "C:\dlstreamer_tmp\build") { + Remove-Item -Recurse -Force "C:\dlstreamer_tmp\build" + } + if (Test-Path ${{ env.DLS_TARGET_DIRECTORY }}) { + Remove-Item -Recurse -Force ${{ env.DLS_TARGET_DIRECTORY }} + } + if (Test-Path ${{ env.DLS_REPO_TARGET_DIRECTORY }}) { + Remove-Item -Recurse -Force ${{ env.DLS_REPO_TARGET_DIRECTORY }} + } diff --git a/.github/workflows/dls-build-dev-docker-images-and-run-unit.yaml b/.github/workflows/dls-build-dev-docker-images-and-run-unit.yaml index 1ee53738..d67aa4e3 100644 --- a/.github/workflows/dls-build-dev-docker-images-and-run-unit.yaml +++ b/.github/workflows/dls-build-dev-docker-images-and-run-unit.yaml @@ -1,146 +1,146 @@ -name: "[DLS] [U22/24] Build dev imgs and run Unit Tests" -run-name: "[DLS] [U22/24] Build dev imgs and run Unit Tests(by ${{ github.actor }})" -on: - workflow_call: - workflow_dispatch: -permissions: {} -env: - MODELS_PATH: "/home/runner/models" - VIDEO_INPUTS_PATH: "/home/runner/videos" - -jobs: - build: - name: Build dev ${{ matrix.ubuntu_version }} imgs and run unit - runs-on: [dls, ubuntu] - permissions: - contents: read - packages: read - strategy: - fail-fast: false - matrix: - include: - - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu22.Dockerfile - ubuntu_version: ubuntu22 - - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu24.Dockerfile - ubuntu_version: ubuntu24 - steps: - - name: Clean before - run: | - sudo rm -rf dlstreamer-repo - - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Init submodules - run: | - cd dlstreamer-repo - git submodule update --init thirdparty/spdlog - git submodule update --init thirdparty/googletest - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 #3.11.1 - - - name: Log in to GitHub Container Registry - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef #3.6.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build dev debug img with cache from GHCR - env: - dev_debug_img: ghcr.io/${{ github.repository }}/dev-debug-img-${{ matrix.ubuntu_version }}:${{ github.sha }} - dev_debug_img_cached: ghcr.io/${{ github.repository }}/dev-debug-img-${{ matrix.ubuntu_version }}:buildcache - run: | - docker buildx build \ - --load \ - --target dlstreamer-dev \ - --tag "${dev_debug_img}" \ - --cache-from="${dev_debug_img_cached}" \ - --build-arg BUILD_ARG=Debug \ - -f ${{ matrix.path_dockerfile }} \ - ./dlstreamer-repo - - # ====================================================== UNIT TESTS ============================================================ - - name: Check models - run: | - echo "## Basic checks" >> $GITHUB_STEP_SUMMARY - if [ -d "${{ env.MODELS_PATH }}" ] && [ "$(ls -A "${{ env.MODELS_PATH }}")" ]; then - echo "Models: found ✅" >> $GITHUB_STEP_SUMMARY - ls -A "${{ env.MODELS_PATH }}" - else - echo "Models: folder not found or it is empty ❌" >> $GITHUB_STEP_SUMMARY - ls -A "${{ env.MODELS_PATH }}" - exit 1 - fi - - - name: Check videos - run: | - if [ -d "${{ env.VIDEO_INPUTS_PATH }}" ] && [ "$(ls -A "${{ env.VIDEO_INPUTS_PATH }}")" ]; then - echo "Tests input videos: found ✅" >> $GITHUB_STEP_SUMMARY - else - echo "Tests input videos: folder not found or it is empty ❌" >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - - name: Create test results folder - env: - RESULTS_DIR: test-results - run: | - mkdir -p ${RESULTS_DIR} - chmod -R 777 ${RESULTS_DIR} - - - name: Run tests - id: run-tests - env: - dev_debug_img: ghcr.io/${{ github.repository }}/dev-debug-img-${{ matrix.ubuntu_version }}:${{ github.sha }} - MAPPED_MODELS_PATH: /home/dlstreamer/models - MAPPED_RESULTS_DIR: /home/dlstreamer/test-results - MAPPED_VIDEO_EXAMPLES_DIR: /home/dlstreamer/video-examples - RESULTS_DIR: test-results - run: | - docker run --device /dev/dri --rm \ - --group-add=$(stat -c "%g" /dev/dri/render*) \ - -v ${{ env.VIDEO_INPUTS_PATH }}:${MAPPED_VIDEO_EXAMPLES_DIR} \ - -v ${{ env.MODELS_PATH }}:${MAPPED_MODELS_PATH} \ - -v $PWD/${RESULTS_DIR}:${MAPPED_RESULTS_DIR} \ - -v $PWD/dlstreamer-repo/tests/scripts:/home/dlstreamer/dlstreamer/scripts \ - -v $PWD/dlstreamer-repo/tests/unit_tests:/home/dlstreamer/dlstreamer/tests \ - -e VIDEO_EXAMPLES_DIR=${MAPPED_VIDEO_EXAMPLES_DIR} \ - -w /home/dlstreamer/dlstreamer \ - -e MODELS_PATH=${MAPPED_MODELS_PATH} \ - -e MODELS_PROC_PATH=/home/dlstreamer/dlstreamer/samples/gstreamer/model_proc \ - ${dev_debug_img} \ - scripts/run_unit_tests.sh "" "" ${MAPPED_RESULTS_DIR} - - #this script creates output file called unit_test_summary.txt - - name: Run python script to get Unit Test results - env: - RESULTS_DIR: test-results - run: python3 dlstreamer-repo/tests/scripts/unit_test_results.py $PWD/${RESULTS_DIR} - - - name: Add test summary to job summary - if: always () - env: - RESULTS_DIR: test-results - run: | - echo "## Test Summary for Unit Tests on TGL" >> $GITHUB_STEP_SUMMARY - cat $PWD/${RESULTS_DIR}/unit_test_summary.txt >> $GITHUB_STEP_SUMMARY - - - name: Upload test results - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 - if: always () - env: - RESULTS_DIR: test-results - with: - name: DLS_unit_tests_tgl_${{ matrix.ubuntu_version }} - path: ${{ env.RESULTS_DIR }}/*.xml - - - name: Clean up - if: always () - env: - RESULTS_DIR: test-results +name: "[DLS] [U22/24] Build dev imgs and run Unit Tests" +run-name: "[DLS] [U22/24] Build dev imgs and run Unit Tests(by ${{ github.actor }})" +on: + workflow_call: + workflow_dispatch: +permissions: {} +env: + MODELS_PATH: "/home/runner/models" + VIDEO_INPUTS_PATH: "/home/runner/videos" + +jobs: + build: + name: Build dev ${{ matrix.ubuntu_version }} imgs and run unit + runs-on: [dls, ubuntu] + permissions: + contents: read + packages: read + strategy: + fail-fast: false + matrix: + include: + - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu22.Dockerfile + ubuntu_version: ubuntu22 + - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu24.Dockerfile + ubuntu_version: ubuntu24 + steps: + - name: Clean before + run: | + sudo rm -rf dlstreamer-repo + + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Init submodules + run: | + cd dlstreamer-repo + git submodule update --init thirdparty/spdlog + git submodule update --init thirdparty/googletest + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 #3.11.1 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef #3.6.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build dev debug img with cache from GHCR + env: + dev_debug_img: ghcr.io/${{ github.repository }}/dev-debug-img-${{ matrix.ubuntu_version }}:${{ github.sha }} + dev_debug_img_cached: ghcr.io/${{ github.repository }}/dev-debug-img-${{ matrix.ubuntu_version }}:buildcache + run: | + docker buildx build \ + --load \ + --target dlstreamer-dev \ + --tag "${dev_debug_img}" \ + --cache-from="${dev_debug_img_cached}" \ + --build-arg BUILD_ARG=Debug \ + -f ${{ matrix.path_dockerfile }} \ + ./dlstreamer-repo + + # ====================================================== UNIT TESTS ============================================================ + - name: Check models + run: | + echo "## Basic checks" >> $GITHUB_STEP_SUMMARY + if [ -d "${{ env.MODELS_PATH }}" ] && [ "$(ls -A "${{ env.MODELS_PATH }}")" ]; then + echo "Models: found ✅" >> $GITHUB_STEP_SUMMARY + ls -A "${{ env.MODELS_PATH }}" + else + echo "Models: folder not found or it is empty ❌" >> $GITHUB_STEP_SUMMARY + ls -A "${{ env.MODELS_PATH }}" + exit 1 + fi + + - name: Check videos + run: | + if [ -d "${{ env.VIDEO_INPUTS_PATH }}" ] && [ "$(ls -A "${{ env.VIDEO_INPUTS_PATH }}")" ]; then + echo "Tests input videos: found ✅" >> $GITHUB_STEP_SUMMARY + else + echo "Tests input videos: folder not found or it is empty ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + fi + + - name: Create test results folder + env: + RESULTS_DIR: test-results + run: | + mkdir -p ${RESULTS_DIR} + chmod -R 777 ${RESULTS_DIR} + + - name: Run tests + id: run-tests + env: + dev_debug_img: ghcr.io/${{ github.repository }}/dev-debug-img-${{ matrix.ubuntu_version }}:${{ github.sha }} + MAPPED_MODELS_PATH: /home/dlstreamer/models + MAPPED_RESULTS_DIR: /home/dlstreamer/test-results + MAPPED_VIDEO_EXAMPLES_DIR: /home/dlstreamer/video-examples + RESULTS_DIR: test-results + run: | + docker run --device /dev/dri --rm \ + --group-add=$(stat -c "%g" /dev/dri/render*) \ + -v ${{ env.VIDEO_INPUTS_PATH }}:${MAPPED_VIDEO_EXAMPLES_DIR} \ + -v ${{ env.MODELS_PATH }}:${MAPPED_MODELS_PATH} \ + -v $PWD/${RESULTS_DIR}:${MAPPED_RESULTS_DIR} \ + -v $PWD/dlstreamer-repo/tests/scripts:/home/dlstreamer/dlstreamer/scripts \ + -v $PWD/dlstreamer-repo/tests/unit_tests:/home/dlstreamer/dlstreamer/tests \ + -e VIDEO_EXAMPLES_DIR=${MAPPED_VIDEO_EXAMPLES_DIR} \ + -w /home/dlstreamer/dlstreamer \ + -e MODELS_PATH=${MAPPED_MODELS_PATH} \ + -e MODELS_PROC_PATH=/home/dlstreamer/dlstreamer/samples/gstreamer/model_proc \ + ${dev_debug_img} \ + scripts/run_unit_tests.sh "" "" ${MAPPED_RESULTS_DIR} + + #this script creates output file called unit_test_summary.txt + - name: Run python script to get Unit Test results + env: + RESULTS_DIR: test-results + run: python3 dlstreamer-repo/tests/scripts/unit_test_results.py $PWD/${RESULTS_DIR} + + - name: Add test summary to job summary + if: always () + env: + RESULTS_DIR: test-results + run: | + echo "## Test Summary for Unit Tests on TGL" >> $GITHUB_STEP_SUMMARY + cat $PWD/${RESULTS_DIR}/unit_test_summary.txt >> $GITHUB_STEP_SUMMARY + + - name: Upload test results + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 + if: always () + env: + RESULTS_DIR: test-results + with: + name: DLS_unit_tests_tgl_${{ matrix.ubuntu_version }} + path: ${{ env.RESULTS_DIR }}/*.xml + + - name: Clean up + if: always () + env: + RESULTS_DIR: test-results run: sudo rm -rf ${RESULTS_DIR} dlstreamer-repo \ No newline at end of file diff --git a/.github/workflows/dls-build-documentation.yaml b/.github/workflows/dls-build-documentation.yaml index 9b130391..f40a3e9f 100644 --- a/.github/workflows/dls-build-documentation.yaml +++ b/.github/workflows/dls-build-documentation.yaml @@ -1,103 +1,103 @@ -name: "[DLS] Documentation PR workflow" -run-name: "[DLS] Documentation PR workflow (by @${{ github.actor }} via ${{ github.event_name }})" -on: - push: - branches: - - 'main' - paths: - - 'docs/**' - pull_request: - paths: - - 'docs/**' -permissions: {} - -jobs: - build-docs: - name: Build DL Streamer documentation - runs-on: ubuntu-latest - permissions: - contents: read - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - - - name: Run build script - run: | - ./docs/build_html.sh "actions-gh-pages:latest" gh-pages - - - name: Documentation build summary - if: always() - run: | - summary_content="" - - # Check if index.html for documentation exists - index_file_path='./docs/build-html/index.html' - if [ -f "$index_file_path" ]; then - result="Documentation built: YES :white_check_mark:" - else - result="Documentation built: NO :x:" - fi - echo "$result" - summary_content+="$result\n" - - # Check broken links in linkcheck - linkcheck_file_path='./docs/build-linkcheck/output.txt' - broken_count=$(grep -o '\[broken\]' "$linkcheck_file_path" | wc -l) - if [ "$broken_count" -eq 0 ]; then - result="Broken links: $broken_count :white_check_mark:" - else - result="Broken links: $broken_count :x:" - fi - echo "$result" - summary_content+="$result\n" - - # Spelling - result="Spelling: check report from artifacts" - echo "$result" - summary_content+="$result\n" - - echo "### Building with Sphinx results" >> $GITHUB_STEP_SUMMARY - echo -e "$summary_content" >> $GITHUB_STEP_SUMMARY - - - name: Check specific links - if: always() - run: | - cd ./docs - python3 ./scripts/specific_links_checker.py 2>&1 | tee specific_links_checker_report.txt - summary=$(grep -E '✅ All links are working\.|❌ [0-9]+ broken link\(s\)' specific_links_checker_report.txt | tail -n 1) - echo "### Extra links checker" >> $GITHUB_STEP_SUMMARY - echo "$summary" >> $GITHUB_STEP_SUMMARY - - - name: Upload linkcheck report - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 - if: always() - with: - name: docs_linkcheck_report - path: ./docs/build-linkcheck/ - - - name: Upload specific links checker report - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 - if: always() - with: - name: docs_specific_links_checker_report - path: ./docs/specific_links_checker_report.txt - - - name: Upload spelling report - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 - if: always() - with: - name: docs_spelling_report - path: ./docs/build-spelling/ - - - name: Upload pages - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 - if: always() - with: - name: docs_site - path: ./docs/build-html - - - name: Clean up - if: always() - run: rm -rf "$GITHUB_WORKSPACE"/* +name: "[DLS] Documentation PR workflow" +run-name: "[DLS] Documentation PR workflow (by @${{ github.actor }} via ${{ github.event_name }})" +on: + push: + branches: + - 'main' + paths: + - 'docs/**' + pull_request: + paths: + - 'docs/**' +permissions: {} + +jobs: + build-docs: + name: Build DL Streamer documentation + runs-on: ubuntu-latest + permissions: + contents: read + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + + - name: Run build script + run: | + ./docs/build_html.sh "actions-gh-pages:latest" gh-pages + + - name: Documentation build summary + if: always() + run: | + summary_content="" + + # Check if index.html for documentation exists + index_file_path='./docs/build-html/index.html' + if [ -f "$index_file_path" ]; then + result="Documentation built: YES :white_check_mark:" + else + result="Documentation built: NO :x:" + fi + echo "$result" + summary_content+="$result\n" + + # Check broken links in linkcheck + linkcheck_file_path='./docs/build-linkcheck/output.txt' + broken_count=$(grep -o '\[broken\]' "$linkcheck_file_path" | wc -l) + if [ "$broken_count" -eq 0 ]; then + result="Broken links: $broken_count :white_check_mark:" + else + result="Broken links: $broken_count :x:" + fi + echo "$result" + summary_content+="$result\n" + + # Spelling + result="Spelling: check report from artifacts" + echo "$result" + summary_content+="$result\n" + + echo "### Building with Sphinx results" >> $GITHUB_STEP_SUMMARY + echo -e "$summary_content" >> $GITHUB_STEP_SUMMARY + + - name: Check specific links + if: always() + run: | + cd ./docs + python3 ./scripts/specific_links_checker.py 2>&1 | tee specific_links_checker_report.txt + summary=$(grep -E '✅ All links are working\.|❌ [0-9]+ broken link\(s\)' specific_links_checker_report.txt | tail -n 1) + echo "### Extra links checker" >> $GITHUB_STEP_SUMMARY + echo "$summary" >> $GITHUB_STEP_SUMMARY + + - name: Upload linkcheck report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 + if: always() + with: + name: docs_linkcheck_report + path: ./docs/build-linkcheck/ + + - name: Upload specific links checker report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 + if: always() + with: + name: docs_specific_links_checker_report + path: ./docs/specific_links_checker_report.txt + + - name: Upload spelling report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 + if: always() + with: + name: docs_spelling_report + path: ./docs/build-spelling/ + + - name: Upload pages + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 + if: always() + with: + name: docs_site + path: ./docs/build-html + + - name: Clean up + if: always() + run: rm -rf "$GITHUB_WORKSPACE"/* diff --git a/.github/workflows/dls-build-sources-make-build.yaml b/.github/workflows/dls-build-sources-make-build.yaml new file mode 100644 index 00000000..d7ac0096 --- /dev/null +++ b/.github/workflows/dls-build-sources-make-build.yaml @@ -0,0 +1,184 @@ +name: "[DLS] [U22/24] Build from sources using make build" +run-name: "[DLS] [U22/24] Build from sources using make build (by ${{ github.actor }})" +on: + workflow_call: + workflow_dispatch: + +permissions: {} +env: + dlstreamer-version: "2025.2.0" + DLS_REL_PATH: "./dlstreamer-repo" + +jobs: + build: + name: "Build from sources using make build on ${{ matrix.os }}" + runs-on: ${{ matrix.os }} + permissions: + contents: read + packages: read + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-22.04 + - os: ubuntu-24.04 + steps: + - name: Initial environment clean + run: | + sudo rm -rf dlstreamer-repo + + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Init submodules + run: | + cd dlstreamer-repo + git submodule update --init thirdparty/spdlog + + + # ======================================================== BUILDING PART ======================================================== + - name: Install prerequisites + run: | + cd ${{ env.DLS_REL_PATH }}/scripts + ./DLS_install_prerequisites.sh + + - name: Install build dependencies + run: | + echo "Installing build dependencies for ${{ matrix.os }}" + if [[ ${{ matrix.os }} == "ubuntu-22.04" ]]; then + echo "Executing Ubuntu 22.04 specific setup..." + sudo apt-get update && \ + sudo apt-get install -y wget vainfo xz-utils python3-pip python3-gi gcc-multilib libglib2.0-dev \ + flex bison autoconf automake libtool libogg-dev make g++ libva-dev yasm libglx-dev libdrm-dev \ + python-gi-dev python3-dev unzip libgflags-dev \ + libgirepository1.0-dev libx265-dev libx264-dev libde265-dev gudev-1.0 libusb-1.0 nasm python3-venv \ + libcairo2-dev libxt-dev libgirepository1.0-dev libgles2-mesa-dev wayland-protocols libcurl4-openssl-dev \ + libssh2-1-dev cmake git valgrind numactl libvpx-dev libopus-dev libsrtp2-dev libxv-dev \ + linux-libc-dev libpmix2 libhwloc15 libhwloc-plugins libxcb1-dev libx11-xcb-dev \ + ffmpeg libpaho-mqtt-dev libpostproc-dev libavfilter-dev libavdevice-dev \ + libswscale-dev libswresample-dev libavutil-dev libavformat-dev libavcodec-dev libxml2-dev ocl-icd-opencl-dev \ + opencl-headers + elif [[ ${{ matrix.os }} == "ubuntu-24.04" ]]; then + echo "Executing Ubuntu 24.04 specific setup..." + sudo apt-get update && \ + sudo apt-get install -y wget vainfo xz-utils python3-pip python3-gi gcc-multilib libglib2.0-dev \ + flex bison autoconf automake libtool libogg-dev make g++ libva-dev yasm libglx-dev libdrm-dev \ + python-gi-dev python3-dev unzip libgflags-dev libcurl4-openssl-dev \ + libgirepository1.0-dev libx265-dev libx264-dev libde265-dev gudev-1.0 libusb-1.0 nasm python3-venv \ + libcairo2-dev libxt-dev libgirepository1.0-dev libgles2-mesa-dev wayland-protocols \ + libssh2-1-dev cmake git valgrind numactl libvpx-dev libopus-dev libsrtp2-dev libxv-dev \ + linux-libc-dev libpmix2t64 libhwloc15 libhwloc-plugins libxcb1-dev libx11-xcb-dev \ + ffmpeg libpaho-mqtt-dev libopencv-dev libpostproc-dev libavfilter-dev libavdevice-dev \ + libswscale-dev libswresample-dev libavutil-dev libavformat-dev libavcodec-dev libtbb12 libxml2-dev \ + ocl-icd-opencl-dev + else + echo "Unknown Ubuntu version: ${{ matrix.os }}" + exit 1 + fi + + - name: Set up a Python environment + run: | + python3 -m venv ~/python3venv + source ~/python3venv/bin/activate + pip install --upgrade pip==24.0 + pip install meson==1.4.1 ninja==1.11.1.1 + + - name: Install OpenVINO + run: | + cd ${{ env.DLS_REL_PATH }} + echo "Installing OpenVINO..." + sudo ./scripts/install_dependencies/install_openvino.sh + echo " " + echo "Installing OpenVINO dependencies..." + sudo -E /opt/intel/openvino_2025/install_dependencies/install_openvino_dependencies.sh + + - name: Install OpenVINO GenAI + run: | + cd ${{ env.DLS_REL_PATH }} + echo "Installing OpenVINO GenAI for ${{ matrix.os }}..." + if [[ ${{ matrix.os }} == "ubuntu-22.04" ]]; then + echo "Executing Ubuntu 22.04 specific setup..." + wget -O- https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2025.4/linux/openvino_genai_ubuntu22_2025.4.0.0_x86_64.tar.gz | tar -xz + mv openvino_genai_ubuntu22_2025.4.0.0_x86_64 /opt/intel/openvino_genai + source /opt/intel/openvino_genai/setupvars.sh + elif [[ ${{ matrix.os }} == "ubuntu-24.04" ]]; then + echo "Executing Ubuntu 24.04 specific setup..." + wget -O- https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2025.4/linux/openvino_genai_ubuntu24_2025.4.0.0_x86_64.tar.gz | tar -xz + sudo mv openvino_genai_ubuntu24_2025.4.0.0_x86_64 /opt/intel/openvino_genai + source /opt/intel/openvino_genai/setupvars.sh + else + echo "Unknown Ubuntu version: ${{ matrix.os }}" + exit 1 + fi + + - name: Build DL Streamer using make build + run: | + echo "Activating Python virtual environment..." + source ~/python3venv/bin/activate + cd ${{ env.DLS_REL_PATH }} + echo " " + echo "Sourcing OpenVINO environment..." + source /opt/intel/openvino_2025/setupvars.sh + echo " " + echo "Sourcing OpenVINO GenAI environment..." + source /opt/intel/openvino_genai/setupvars.sh + echo " " + echo "Building DL Streamer using make build..." + make build + + - name: Install DL Streamer + run: | + cd ${{ env.DLS_REL_PATH }} + sudo -E make install + + - name: Install Python dependencies + run: | + sudo apt-get install -y -q --no-install-recommends gcc cmake python3-full python-gi-dev python3-dev python3-pip \ + libglib2.0-dev libcairo2-dev libopencv-objdetect-dev libopencv-photo-dev libopencv-stitching-dev libopencv-video-dev \ + libopencv-calib3d-dev libopencv-core-dev libopencv-dnn-dev libgirepository1.0-dev + + source ~/python3venv/bin/activate + cd ${{ env.DLS_REL_PATH }} + python3 -m pip install -r requirements.txt + + - name: Verify DL Streamer installation + run: | + echo "Setting up DL Streamer environment..." + export LIBVA_DRIVER_NAME=iHD + export GST_PLUGIN_PATH="/opt/intel/dlstreamer/Release/lib:/opt/intel/dlstreamer/gstreamer/lib/gstreamer-1.0:$GST_PLUGIN_PATH" + export LD_LIBRARY_PATH="/opt/intel/dlstreamer/Release/lib:/opt/intel/dlstreamer/gstreamer/lib:/opt/intel/dlstreamer/opencv/lib:/opt/intel/dlstreamer/rdkafka/lib:$LD_LIBRARY_PATH" + export LIBVA_DRIVERS_PATH="/usr/lib/x86_64-linux-gnu/dri" + export GST_VA_ALL_DRIVERS="1" + export PATH="/opt/intel/dlstreamer/Release/bin:/opt/intel/dlstreamer/gstreamer/bin:/opt/intel/dlstreamer/opencv/bin:$HOME/.local/bin:$HOME/python3venv/bin:$PATH" + export PKG_CONFIG_PATH="/opt/intel/dlstreamer/Release/lib/pkgconfig:/opt/intel/dlstreamer/gstreamer/lib/pkgconfig:$PKG_CONFIG_PATH" + export GST_PLUGIN_FEATURE_RANK=${GST_PLUGIN_FEATURE_RANK},ximagesink:MAX + export GI_TYPELIB_PATH="/opt/intel/dlstreamer/gstreamer/lib/girepository-1.0:/usr/lib/x86_64-linux-gnu/girepository-1.0gi" + export PYTHONPATH="/opt/intel/dlstreamer/gstreamer/lib/python3/dist-packages:/opt/intel/dlstreamer/python:/opt/intel/dlstreamer/gstreamer/lib/python3/dist-packages:$PYTHONPATH" + + echo "Activating Python virtual environment..." + source ~/python3venv/bin/activate + echo " " + echo "Sourcing OpenVINO environment..." + source /opt/intel/openvino_2025/setupvars.sh + echo " " + echo "Sourcing OpenVINO GenAI environment..." + source /opt/intel/openvino_genai/setupvars.sh + echo " " + + echo "Verifying DL Streamer installation..." + gst-inspect-1.0 | grep gva + gst-inspect-1.0 gvadetect + gst-inspect-1.0 gvawatermark3d + gst-inspect-1.0 gvapython + gst-inspect-1.0 gvamotiondetect + gst-inspect-1.0 gvafpscounter + + # ======================================================== CLEANUP PART ======================================================== + - name: Clean up + if: always() + run: | + rm -rf ~/python3venv + sudo rm -rf ${{ env.DLS_REL_PATH }} diff --git a/.github/workflows/dls-coverity.yaml b/.github/workflows/dls-coverity.yaml index 2b12d55b..d23e0918 100644 --- a/.github/workflows/dls-coverity.yaml +++ b/.github/workflows/dls-coverity.yaml @@ -1,143 +1,143 @@ -name: "[DLS] Coverity (C/C++)" -run-name: "[DLS] Coverity (C/C++)" -on: - workflow_call: - secrets: - DLS_COVERITY_TOKEN: - required: true - DLS_COVERITY_EMAIL: - required: true - DLS_COVERITY_PROJECT: - required: true - workflow_dispatch: -permissions: {} - -jobs: - detect-languages: - runs-on: ubuntu-latest - permissions: - contents: read - outputs: - run-analysis: ${{ steps.detect-langs.outputs.run-analysis }} - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - fetch-depth: 0 - - - name: Detect changed languages and projects - id: detect-langs - run: | - cd dlstreamer-repo - if [ "$(git rev-parse --abbrev-ref HEAD)" != "main" ]; then - git fetch origin main:main - echo "Fetched main branch" - fi - changed_files=$(git diff --name-only main...$GITHUB_SHA -- '*.h' '*.hpp' '*.c' '*.cpp' || true) - echo "Performed git diff" - - if [ -z "$changed_files" ]; then - echo "No relevant changed files detected." - echo "run-analysis=false" >> $GITHUB_OUTPUT - exit 0 - else - run_analysis=true - fi - - - echo "Changed files:" - echo "$changed_files" - echo "Run analysis:" - echo "$run_analysis" - echo "run-analysis=$run_analysis" >> $GITHUB_OUTPUT - - coverity-scan: - needs: detect-languages - runs-on: ubuntu-latest - permissions: - contents: read - if: needs.detect-languages.outputs.run-analysis == 'true' - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Init submodules - run: | - cd dlstreamer-repo - git submodule update --init thirdparty/spdlog - - - name: Build code manually for dlstreamer - run: | - echo "Installing dependencies" - sudo apt-get update - sudo apt-get install -y wget vainfo xz-utils python3-pip python3-gi gcc-multilib libglib2.0-dev \ - flex bison autoconf automake libtool libogg-dev make g++ libva-dev yasm libglx-dev libdrm-dev \ - python-gi-dev python3-dev unzip libgflags-dev libcurl4-openssl-dev \ - libgirepository1.0-dev libx265-dev libx264-dev libde265-dev gudev-1.0 libusb-1.0 nasm python3-venv \ - libcairo2-dev libxt-dev libgirepository1.0-dev libgles2-mesa-dev wayland-protocols \ - libssh2-1-dev cmake git valgrind numactl libvpx-dev libopus-dev libsrtp2-dev libxv-dev \ - linux-libc-dev libpmix2t64 libhwloc15 libhwloc-plugins libxcb1-dev libx11-xcb-dev \ - ffmpeg librdkafka-dev libpaho-mqtt-dev libopencv-dev libpostproc-dev libavfilter-dev libavdevice-dev \ - libswscale-dev libswresample-dev libavutil-dev libavformat-dev libavcodec-dev libtbb12 ocl-icd-opencl-dev - - echo "Setting up a Python environment" - python3 -m venv python3venv - source python3venv/bin/activate - pip install --upgrade pip==24.0 - pip install meson==1.4.1 ninja==1.11.1.1 - - echo "Build gstreamer" - git clone https://gitlab.freedesktop.org/gstreamer/gstreamer.git - cd gstreamer - git checkout tags/1.26.1 -b 1.26.1 - export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig - meson setup -Dexamples=disabled -Dtests=disabled -Dvaapi=enabled -Dgst-examples=disabled --buildtype=release --prefix=/opt/intel/dlstreamer/gstreamer --libdir=lib/ --libexecdir=bin/ build/ - ninja -C build - sudo env PATH="$PWD/../python3venv/bin:$PATH" meson install -C build/ - cd .. - - echo "Install OpenVINO" - echo "deb https://apt.repos.intel.com/openvino/2025 ubuntu24 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2025.list - curl -sSL -O https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB - sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB - sudo apt-get update && sudo apt-get install --no-install-recommends -y openvino-2025.2.0 - sudo apt-get clean - - - name: Download Coverity Scan Tool - run: | - cd dlstreamer-repo - wget --quiet https://scan.coverity.com/download/linux64 \ - --post-data "token=${{ secrets.DLS_COVERITY_TOKEN }}&project=${{ secrets.DLS_COVERITY_PROJECT }}" \ - -O coverity_tool.tgz - mkdir cov-analysis - tar xzf coverity_tool.tgz --strip-components=1 -C cov-analysis - - - name: Build with Coverity Analysis - run: | - cd dlstreamer-repo - mkdir build && cd build - export PKG_CONFIG_PATH="/opt/intel/dlstreamer/gstreamer/lib/pkgconfig:${PKG_CONFIG_PATH}" - ../cov-analysis/bin/cov-build --dir cov-int cmake -DENABLE_PAHO_INSTALLATION=ON -DENABLE_RDKAFKA_INSTALLATION=ON -DENABLE_VAAPI=ON -DENABLE_SAMPLES=ON .. - ../cov-analysis/bin/cov-build --dir cov-int make -j$(nproc) - - - name: Create tarball for upload - run: | - cd dlstreamer-repo - tar czf coverity-output.tgz -C build cov-int - - - name: Upload to Coverity Scan - env: - GIT_BRANCH: ${{ github.ref_name }} - run: | - cd dlstreamer-repo - curl --form token=${{ secrets.DLS_COVERITY_TOKEN }} \ - --form email=${{ secrets.DLS_COVERITY_EMAIL }} \ - --form file=@coverity-output.tgz \ - --form version="$GIT_BRANCH-`date +%Y%m%d%H%M%S`" \ - --form description="GA scan $GIT_BRANCH-`date +%Y%m%d%H%M%S`" \ - https://scan.coverity.com/builds?project=${{ secrets.DLS_COVERITY_PROJECT }} +name: "[DLS] Coverity (C/C++)" +run-name: "[DLS] Coverity (C/C++)" +on: + workflow_call: + secrets: + DLS_COVERITY_TOKEN: + required: true + DLS_COVERITY_EMAIL: + required: true + DLS_COVERITY_PROJECT: + required: true + workflow_dispatch: +permissions: {} + +jobs: + detect-languages: + runs-on: ubuntu-latest + permissions: + contents: read + outputs: + run-analysis: ${{ steps.detect-langs.outputs.run-analysis }} + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + fetch-depth: 0 + + - name: Detect changed languages and projects + id: detect-langs + run: | + cd dlstreamer-repo + if [ "$(git rev-parse --abbrev-ref HEAD)" != "main" ]; then + git fetch origin main:main + echo "Fetched main branch" + fi + changed_files=$(git diff --name-only main...$GITHUB_SHA -- '*.h' '*.hpp' '*.c' '*.cpp' || true) + echo "Performed git diff" + + if [ -z "$changed_files" ]; then + echo "No relevant changed files detected." + echo "run-analysis=false" >> $GITHUB_OUTPUT + exit 0 + else + run_analysis=true + fi + + + echo "Changed files:" + echo "$changed_files" + echo "Run analysis:" + echo "$run_analysis" + echo "run-analysis=$run_analysis" >> $GITHUB_OUTPUT + + coverity-scan: + needs: detect-languages + runs-on: ubuntu-latest + permissions: + contents: read + if: needs.detect-languages.outputs.run-analysis == 'true' + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Init submodules + run: | + cd dlstreamer-repo + git submodule update --init thirdparty/spdlog + + - name: Build code manually for dlstreamer + run: | + echo "Installing dependencies" + sudo apt-get update + sudo apt-get install -y wget vainfo xz-utils python3-pip python3-gi gcc-multilib libglib2.0-dev \ + flex bison autoconf automake libtool libogg-dev make g++ libva-dev yasm libglx-dev libdrm-dev \ + python-gi-dev python3-dev unzip libgflags-dev libcurl4-openssl-dev \ + libgirepository1.0-dev libx265-dev libx264-dev libde265-dev gudev-1.0 libusb-1.0 nasm python3-venv \ + libcairo2-dev libxt-dev libgirepository1.0-dev libgles2-mesa-dev wayland-protocols \ + libssh2-1-dev cmake git valgrind numactl libvpx-dev libopus-dev libsrtp2-dev libxv-dev \ + linux-libc-dev libpmix2t64 libhwloc15 libhwloc-plugins libxcb1-dev libx11-xcb-dev \ + ffmpeg librdkafka-dev libpaho-mqtt-dev libopencv-dev libpostproc-dev libavfilter-dev libavdevice-dev \ + libswscale-dev libswresample-dev libavutil-dev libavformat-dev libavcodec-dev libtbb12 ocl-icd-opencl-dev + + echo "Setting up a Python environment" + python3 -m venv python3venv + source python3venv/bin/activate + pip install --upgrade pip==24.0 + pip install meson==1.4.1 ninja==1.11.1.1 + + echo "Build gstreamer" + git clone https://gitlab.freedesktop.org/gstreamer/gstreamer.git + cd gstreamer + git checkout tags/1.26.1 -b 1.26.1 + export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig + meson setup -Dexamples=disabled -Dtests=disabled -Dvaapi=enabled -Dgst-examples=disabled --buildtype=release --prefix=/opt/intel/dlstreamer/gstreamer --libdir=lib/ --libexecdir=bin/ build/ + ninja -C build + sudo env PATH="$PWD/../python3venv/bin:$PATH" meson install -C build/ + cd .. + + echo "Install OpenVINO" + echo "deb https://apt.repos.intel.com/openvino/2025 ubuntu24 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2025.list + curl -sSL -O https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + sudo apt-get update && sudo apt-get install --no-install-recommends -y openvino-2025.2.0 + sudo apt-get clean + + - name: Download Coverity Scan Tool + run: | + cd dlstreamer-repo + wget --quiet https://scan.coverity.com/download/linux64 \ + --post-data "token=${{ secrets.DLS_COVERITY_TOKEN }}&project=${{ secrets.DLS_COVERITY_PROJECT }}" \ + -O coverity_tool.tgz + mkdir cov-analysis + tar xzf coverity_tool.tgz --strip-components=1 -C cov-analysis + + - name: Build with Coverity Analysis + run: | + cd dlstreamer-repo + mkdir build && cd build + export PKG_CONFIG_PATH="/opt/intel/dlstreamer/gstreamer/lib/pkgconfig:${PKG_CONFIG_PATH}" + ../cov-analysis/bin/cov-build --dir cov-int cmake -DENABLE_PAHO_INSTALLATION=ON -DENABLE_RDKAFKA_INSTALLATION=ON -DENABLE_VAAPI=ON -DENABLE_SAMPLES=ON .. + ../cov-analysis/bin/cov-build --dir cov-int make -j$(nproc) + + - name: Create tarball for upload + run: | + cd dlstreamer-repo + tar czf coverity-output.tgz -C build cov-int + + - name: Upload to Coverity Scan + env: + GIT_BRANCH: ${{ github.ref_name }} + run: | + cd dlstreamer-repo + curl --form token=${{ secrets.DLS_COVERITY_TOKEN }} \ + --form email=${{ secrets.DLS_COVERITY_EMAIL }} \ + --form file=@coverity-output.tgz \ + --form version="$GIT_BRANCH-`date +%Y%m%d%H%M%S`" \ + --form description="GA scan $GIT_BRANCH-`date +%Y%m%d%H%M%S`" \ + https://scan.coverity.com/builds?project=${{ secrets.DLS_COVERITY_PROJECT }} diff --git a/.github/workflows/dls-download-models.yaml b/.github/workflows/dls-download-models.yaml index 3a2b5c50..9a328cc6 100644 --- a/.github/workflows/dls-download-models.yaml +++ b/.github/workflows/dls-download-models.yaml @@ -1,242 +1,242 @@ -name: "[DLS] Models update on self-hosted runners" -run-name: "[DLS] Models update on self-hosted runners (by @${{ github.actor }} via ${{ github.event_name }})" -on: - schedule: - - cron: '0 5 * * MON' # 5:00 UTC each Monday - workflow_dispatch: - inputs: - models_to_download: - description: 'Which models to download?' - required: true - default: 'all' - type: choice - options: - - all - - public - - omz - - specific_model - specific_model_name: - description: 'What SPECIFIC model do you want to download?' - required: false - type: string - specific_model_path: - description: 'Where do you want to download your SPECIFIC model? (public/omz)' - required: false - type: string - runner_labels: - description: "List of runner(s) labels (example: DLS-TGL-02,DLS-ARL-01)" - required: false - type: string - -permissions: {} -env: - MODELS_PATH: "$HOME/models" - MODELS_DIR_MIN_SIZE_MB: 10 - DEFAULT_LABELS: "DLS-ARL-01,DLS-ARL-02,DLS-ARL-03,DLS-TGL-01,DLS-TGL-02,DLS-TGL-03,DLS-TGL-04,DLS-TGL-05" - -jobs: - setup-runners: - name: Set runners to execute update - runs-on: [self-hosted, ubuntu] - outputs: - runners: ${{ steps.set-labels.outputs.runners }} - steps: - - name: Set runners labels - id: set-labels - env: - user_labels: ${{ inputs.runner_labels }} - run: | - LABELS=$user_labels - if [ -z "$LABELS" ]; then - LABELS="${{ env.DEFAULT_LABELS }}" # Use default configuration if user didn't set any input labels - fi - LABELS_ARRAY=$(echo "$LABELS" | jq -Rc 'split(",")') - echo "Runners lables array: $LABELS_ARRAY" - echo "runners=$LABELS_ARRAY" >> $GITHUB_OUTPUT - - update_linux_hosts: - name: Update on Linux runners - needs: setup-runners - permissions: - contents: read - strategy: - fail-fast: false - matrix: - runner: ${{ fromJson(needs.setup-runners.outputs.runners) }} - runs-on: - - ${{ matrix.runner }} - steps: - - name: Get script - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Download only specific model - env: - model_path: ${{ inputs.specific_model_path }} - model_name: ${{ inputs.specific_model_name }} - if: ${{ inputs.models_to_download == 'specific_model' }} - run: | - MODEL_PATH=$model_path - MODEL_NAME=$model_name - export MODELS_PATH=${{ env.MODELS_PATH }} - echo "MODEL_PATH=$MODEL_PATH" - echo "MODEL_NAME=$MODEL_NAME" - echo "Downloading specific model ${{ env.MODELS_PATH }}/$MODEL_PATH/$MODEL_NAME" >> $GITHUB_STEP_SUMMARY - if [ -d "${{ env.MODELS_PATH }}/$MODEL_PATH/$MODEL_NAME" ]; then - echo "Removing $MODEL_NAME directory" - rm -rf "${{ env.MODELS_PATH }}/$MODEL_PATH/$MODEL_NAME" - - if [[ "$MODEL_PATH" == "public" ]]; then - ./dlstreamer-repo/samples/download_public_models.sh "$MODEL_NAME" coco128 - elif [[ "$MODEL_PATH" == "omz" ]]; then - mkdir -p .virtualenvs/dlstreamer - python3 -m venv .virtualenvs/dlstreamer - source .virtualenvs/dlstreamer/bin/activate - pip3 install --no-cache-dir --upgrade tensorflow==2.19.1 openvino-dev[onnx]==2024.6.0 torch==2.8.0 - export MODELS_PATH=${{ env.MODELS_PATH }} - ./dlstreamer-repo/samples/download_omz_models.sh $MODEL_NAME - deactivate - fi - fi - - - name: Prepare directories - public models - if: ${{ inputs.models_to_download == 'public' || inputs.models_to_download == 'all' || github.event_name == 'schedule' }} - run: | - # Prepare directories with backup of existing ones - # 1) mv public_old public_old_old - # 2) mv public public_old - - if [ -d "${{ env.MODELS_PATH }}/public_old" ]; then - echo "Changing directories names: public_old -> public_old_old" - [ -d "${{ env.MODELS_PATH }}/public_old_old" ] && rm -r "${{ env.MODELS_PATH }}/public_old_old" - mv "${{ env.MODELS_PATH }}/public_old" "${{ env.MODELS_PATH }}/public_old_old" - fi - if [ -d "${{ env.MODELS_PATH }}/public" ]; then - echo "Changing directories names: public -> public_old" - [ -d "${{ env.MODELS_PATH }}/public_old" ] && rm -r "${{ env.MODELS_PATH }}/public_old" - mv "${{ env.MODELS_PATH }}/public" "${{ env.MODELS_PATH }}/public_old" - fi - - - name: Prepare directories - OMZ models - if: ${{ inputs.models_to_download == 'omz' || inputs.models_to_download == 'all' || github.event_name == 'schedule' }} - run: | - # Prepare directories with backup of existing ones - # 1) mv intel intel_old - # 2) mv intel_old intel_old_old - - if [ -d "${{ env.MODELS_PATH }}/intel_old" ]; then - echo "Changing directories names: intel_old -> intel_old_old" - [ -d "${{ env.MODELS_PATH }}/intel_old_old" ] && rm -r "${{ env.MODELS_PATH }}/intel_old_old" - mv "${{ env.MODELS_PATH }}/intel_old" "${{ env.MODELS_PATH }}/intel_old_old" - fi - if [ -d "${{ env.MODELS_PATH }}/intel" ]; then - echo "Changing directories names: intel -> intel_old" - [ -d "${{ env.MODELS_PATH }}/intel_old" ] && rm -r "${{ env.MODELS_PATH }}/intel_old" - mv "${{ env.MODELS_PATH }}/intel" "${{ env.MODELS_PATH }}/intel_old" - fi - - - name: Download public models - id: download-public - if: ${{ inputs.models_to_download == 'public' || inputs.models_to_download == 'all' || github.event_name == 'schedule' }} - run: | - export MODELS_PATH=${{ env.MODELS_PATH }} - echo "Downloading public models" - ./dlstreamer-repo/samples/download_public_models.sh all coco128 - - - name: Download OMZ models (setup venv, dependencies and download) - id: download-omz - if: ${{ inputs.models_to_download == 'omz' || inputs.models_to_download == 'all' || github.event_name == 'schedule' }} - run: | - mkdir -p .virtualenvs/dlstreamer - python3 -m venv .virtualenvs/dlstreamer - source .virtualenvs/dlstreamer/bin/activate - pip3 install --no-cache-dir --upgrade tensorflow==2.19.1 openvino-dev[onnx]==2024.6.0 torch==2.8.0 - export MODELS_PATH=${{ env.MODELS_PATH }} - ./dlstreamer-repo/samples/download_omz_models.sh - deactivate - - - name: Verify public models downloading - id: verify-public - if: > - steps.download-public.outcome == 'success' && - (inputs.models_to_download == 'public' || inputs.models_to_download == 'all' || github.event_name == 'schedule') - run: | - if [ -z "$( ls -A ${{ env.MODELS_PATH }}/public )" ]; then - echo "Public models not downloaded correctly - directory not created ❌" - echo "Public models not downloaded - directory not created ❌" >> $GITHUB_STEP_SUMMARY - exit 1 - else - echo "Public models directory created" - echo "Public models directory created" >> $GITHUB_STEP_SUMMARY - DIR_SIZE_MB=$(du -sm "${{ env.MODELS_PATH }}/public" | cut -f1) - echo "Public models directory size: ${DIR_SIZE_MB} MB" - echo "Public models directory size: ${DIR_SIZE_MB} MB" >> $GITHUB_STEP_SUMMARY - if [ "$DIR_SIZE_MB" -ge "${{ env.MODELS_DIR_MIN_SIZE_MB }}" ]; then - echo "Public models downloaded ✅" - echo "Public models downloaded ✅" >> $GITHUB_STEP_SUMMARY - else - Public models not downloaded correctly - directory too small ❌" - Public models not downloaded correctly - directory too small ❌" >> $GITHUB_STEP_SUMMARY - fi - fi - - - name: Verify OMZ models downloading - id: verify-omz - if: > - steps.download-omz.outcome == 'success' && - (inputs.models_to_download == 'omz' || inputs.models_to_download == 'all' || github.event_name == 'schedule') - run: | - if [ -z "$( ls -A ${{ env.MODELS_PATH }}/intel )" ]; then - echo "Open Model Zoo models not downloaded correctly - directory not created ❌" - echo "Open Model Zoo models not downloaded - directory not created ❌" >> $GITHUB_STEP_SUMMARY - exit 1 - else - echo "Open Model Zoo models directory created" - echo "Open Model Zoo models directory created" >> $GITHUB_STEP_SUMMARY - DIR_SIZE_MB=$(du -sm "${{ env.MODELS_PATH }}/intel" | cut -f1) - echo "Open Model Zoo models directory size: ${DIR_SIZE_MB} MB" - echo "Open Model Zoo models directory size: ${DIR_SIZE_MB} MB" >> $GITHUB_STEP_SUMMARY - if [ "$DIR_SIZE_MB" -ge "${{ env.MODELS_DIR_MIN_SIZE_MB }}" ]; then - echo "Open Model Zoo models downloaded ✅" - echo "Open Model Zoo models downloaded ✅" >> $GITHUB_STEP_SUMMARY - else - Open Model Zoo models not downloaded correctly - directory too small ❌" - Open Model Zoo models not downloaded correctly - directory too small ❌" >> $GITHUB_STEP_SUMMARY - fi - fi - - - name: Manage public models directories after success - if: ${{ steps.verify-public.outcome == 'success' && steps.download-public.outcome == 'success' }} - run: | - [ -d "${{ env.MODELS_PATH }}/public_old_old" ] && rm -rf "${{ env.MODELS_PATH }}/public_old_old" - echo "New public models set up ✅" >> $GITHUB_STEP_SUMMARY - - - name: Manage OMZ models directories after success - if: ${{ steps.verify-omz.outcome == 'success' && steps.download-omz.outcome == 'success' }} - run: | - [ -d "${{ env.MODELS_PATH }}/intel_old_old" ] && rm -rf "${{ env.MODELS_PATH }}/intel_old_old" - echo "New Open Model Zoo models set up ✅" >> $GITHUB_STEP_SUMMARY - - - name: Restore public models directories after fail - if: ${{ always() && steps.download-public.outcome != 'skipped' && !(steps.verify-public.outcome == 'success' && steps.download-public.outcome == 'success') }} - run: | - [ -d "${{ env.MODELS_PATH }}/public" ] && rm -rf "${{ env.MODELS_PATH }}/public" - [ -d "${{ env.MODELS_PATH }}/public_old" ] && mv "${{ env.MODELS_PATH }}/public_old" "${{ env.MODELS_PATH }}/public" - [ -d "${{ env.MODELS_PATH }}/public_old_old" ] && mv "${{ env.MODELS_PATH }}/public_old_old" "${{ env.MODELS_PATH }}/public_old" - echo "Old public models restored" >> $GITHUB_STEP_SUMMARY - - - name: Restore OMZ models directories after fail - if: ${{ always() && steps.download-omz.outcome != 'skipped' && !(steps.verify-omz.outcome == 'success' && steps.download-omz.outcome == 'success') }} - run: | - [ -d "${{ env.MODELS_PATH }}/intel" ] && rm -rf "${{ env.MODELS_PATH }}/intel" - [ -d "${{ env.MODELS_PATH }}/intel_old" ] && mv "${{ env.MODELS_PATH }}/intel_old" "${{ env.MODELS_PATH }}/intel" - [ -d "${{ env.MODELS_PATH }}/intel_old_old" ] && mv "${{ env.MODELS_PATH }}/intel_old_old" "${{ env.MODELS_PATH }}/intel_old" - echo "Old Open Model Zoo models restored" >> $GITHUB_STEP_SUMMARY - - - name: Clean up - if: always() - run: | - rm -rf dlstreamer-repo .virtualenvs - rm -rf $HOME/.virtualenvs +name: "[DLS] Models update on self-hosted runners" +run-name: "[DLS] Models update on self-hosted runners (by @${{ github.actor }} via ${{ github.event_name }})" +on: + schedule: + - cron: '0 5 * * MON' # 5:00 UTC each Monday + workflow_dispatch: + inputs: + models_to_download: + description: 'Which models to download?' + required: true + default: 'all' + type: choice + options: + - all + - public + - omz + - specific_model + specific_model_name: + description: 'What SPECIFIC model do you want to download?' + required: false + type: string + specific_model_path: + description: 'Where do you want to download your SPECIFIC model? (public/omz)' + required: false + type: string + runner_labels: + description: "List of runner(s) labels (example: DLS-TGL-02,DLS-ARL-01)" + required: false + type: string + +permissions: {} +env: + MODELS_PATH: "$HOME/models" + MODELS_DIR_MIN_SIZE_MB: 10 + DEFAULT_LABELS: "DLS-ARL-01,DLS-ARL-02,DLS-ARL-03,DLS-TGL-01,DLS-TGL-02,DLS-TGL-03,DLS-TGL-04,DLS-TGL-05" + +jobs: + setup-runners: + name: Set runners to execute update + runs-on: [self-hosted, ubuntu] + outputs: + runners: ${{ steps.set-labels.outputs.runners }} + steps: + - name: Set runners labels + id: set-labels + env: + user_labels: ${{ inputs.runner_labels }} + run: | + LABELS=$user_labels + if [ -z "$LABELS" ]; then + LABELS="${{ env.DEFAULT_LABELS }}" # Use default configuration if user didn't set any input labels + fi + LABELS_ARRAY=$(echo "$LABELS" | jq -Rc 'split(",")') + echo "Runners lables array: $LABELS_ARRAY" + echo "runners=$LABELS_ARRAY" >> $GITHUB_OUTPUT + + update_linux_hosts: + name: Update on Linux runners + needs: setup-runners + permissions: + contents: read + strategy: + fail-fast: false + matrix: + runner: ${{ fromJson(needs.setup-runners.outputs.runners) }} + runs-on: + - ${{ matrix.runner }} + steps: + - name: Get script + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Download only specific model + env: + model_path: ${{ inputs.specific_model_path }} + model_name: ${{ inputs.specific_model_name }} + if: ${{ inputs.models_to_download == 'specific_model' }} + run: | + MODEL_PATH=$model_path + MODEL_NAME=$model_name + export MODELS_PATH=${{ env.MODELS_PATH }} + echo "MODEL_PATH=$MODEL_PATH" + echo "MODEL_NAME=$MODEL_NAME" + echo "Downloading specific model ${{ env.MODELS_PATH }}/$MODEL_PATH/$MODEL_NAME" >> $GITHUB_STEP_SUMMARY + if [ -d "${{ env.MODELS_PATH }}/$MODEL_PATH/$MODEL_NAME" ]; then + echo "Removing $MODEL_NAME directory" + rm -rf "${{ env.MODELS_PATH }}/$MODEL_PATH/$MODEL_NAME" + + if [[ "$MODEL_PATH" == "public" ]]; then + ./dlstreamer-repo/samples/download_public_models.sh "$MODEL_NAME" coco128 + elif [[ "$MODEL_PATH" == "omz" ]]; then + mkdir -p .virtualenvs/dlstreamer + python3 -m venv .virtualenvs/dlstreamer + source .virtualenvs/dlstreamer/bin/activate + pip3 install --no-cache-dir --upgrade tensorflow==2.19.1 openvino-dev[onnx]==2024.6.0 torch==2.8.0 + export MODELS_PATH=${{ env.MODELS_PATH }} + ./dlstreamer-repo/samples/download_omz_models.sh $MODEL_NAME + deactivate + fi + fi + + - name: Prepare directories - public models + if: ${{ inputs.models_to_download == 'public' || inputs.models_to_download == 'all' || github.event_name == 'schedule' }} + run: | + # Prepare directories with backup of existing ones + # 1) mv public_old public_old_old + # 2) mv public public_old + + if [ -d "${{ env.MODELS_PATH }}/public_old" ]; then + echo "Changing directories names: public_old -> public_old_old" + [ -d "${{ env.MODELS_PATH }}/public_old_old" ] && rm -r "${{ env.MODELS_PATH }}/public_old_old" + mv "${{ env.MODELS_PATH }}/public_old" "${{ env.MODELS_PATH }}/public_old_old" + fi + if [ -d "${{ env.MODELS_PATH }}/public" ]; then + echo "Changing directories names: public -> public_old" + [ -d "${{ env.MODELS_PATH }}/public_old" ] && rm -r "${{ env.MODELS_PATH }}/public_old" + mv "${{ env.MODELS_PATH }}/public" "${{ env.MODELS_PATH }}/public_old" + fi + + - name: Prepare directories - OMZ models + if: ${{ inputs.models_to_download == 'omz' || inputs.models_to_download == 'all' || github.event_name == 'schedule' }} + run: | + # Prepare directories with backup of existing ones + # 1) mv intel intel_old + # 2) mv intel_old intel_old_old + + if [ -d "${{ env.MODELS_PATH }}/intel_old" ]; then + echo "Changing directories names: intel_old -> intel_old_old" + [ -d "${{ env.MODELS_PATH }}/intel_old_old" ] && rm -r "${{ env.MODELS_PATH }}/intel_old_old" + mv "${{ env.MODELS_PATH }}/intel_old" "${{ env.MODELS_PATH }}/intel_old_old" + fi + if [ -d "${{ env.MODELS_PATH }}/intel" ]; then + echo "Changing directories names: intel -> intel_old" + [ -d "${{ env.MODELS_PATH }}/intel_old" ] && rm -r "${{ env.MODELS_PATH }}/intel_old" + mv "${{ env.MODELS_PATH }}/intel" "${{ env.MODELS_PATH }}/intel_old" + fi + + - name: Download public models + id: download-public + if: ${{ inputs.models_to_download == 'public' || inputs.models_to_download == 'all' || github.event_name == 'schedule' }} + run: | + export MODELS_PATH=${{ env.MODELS_PATH }} + echo "Downloading public models" + ./dlstreamer-repo/samples/download_public_models.sh all coco128 + + - name: Download OMZ models (setup venv, dependencies and download) + id: download-omz + if: ${{ inputs.models_to_download == 'omz' || inputs.models_to_download == 'all' || github.event_name == 'schedule' }} + run: | + mkdir -p .virtualenvs/dlstreamer + python3 -m venv .virtualenvs/dlstreamer + source .virtualenvs/dlstreamer/bin/activate + pip3 install --no-cache-dir --upgrade tensorflow==2.19.1 openvino-dev[onnx]==2024.6.0 torch==2.8.0 + export MODELS_PATH=${{ env.MODELS_PATH }} + ./dlstreamer-repo/samples/download_omz_models.sh + deactivate + + - name: Verify public models downloading + id: verify-public + if: > + steps.download-public.outcome == 'success' && + (inputs.models_to_download == 'public' || inputs.models_to_download == 'all' || github.event_name == 'schedule') + run: | + if [ -z "$( ls -A ${{ env.MODELS_PATH }}/public )" ]; then + echo "Public models not downloaded correctly - directory not created ❌" + echo "Public models not downloaded - directory not created ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo "Public models directory created" + echo "Public models directory created" >> $GITHUB_STEP_SUMMARY + DIR_SIZE_MB=$(du -sm "${{ env.MODELS_PATH }}/public" | cut -f1) + echo "Public models directory size: ${DIR_SIZE_MB} MB" + echo "Public models directory size: ${DIR_SIZE_MB} MB" >> $GITHUB_STEP_SUMMARY + if [ "$DIR_SIZE_MB" -ge "${{ env.MODELS_DIR_MIN_SIZE_MB }}" ]; then + echo "Public models downloaded ✅" + echo "Public models downloaded ✅" >> $GITHUB_STEP_SUMMARY + else + Public models not downloaded correctly - directory too small ❌" + Public models not downloaded correctly - directory too small ❌" >> $GITHUB_STEP_SUMMARY + fi + fi + + - name: Verify OMZ models downloading + id: verify-omz + if: > + steps.download-omz.outcome == 'success' && + (inputs.models_to_download == 'omz' || inputs.models_to_download == 'all' || github.event_name == 'schedule') + run: | + if [ -z "$( ls -A ${{ env.MODELS_PATH }}/intel )" ]; then + echo "Open Model Zoo models not downloaded correctly - directory not created ❌" + echo "Open Model Zoo models not downloaded - directory not created ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo "Open Model Zoo models directory created" + echo "Open Model Zoo models directory created" >> $GITHUB_STEP_SUMMARY + DIR_SIZE_MB=$(du -sm "${{ env.MODELS_PATH }}/intel" | cut -f1) + echo "Open Model Zoo models directory size: ${DIR_SIZE_MB} MB" + echo "Open Model Zoo models directory size: ${DIR_SIZE_MB} MB" >> $GITHUB_STEP_SUMMARY + if [ "$DIR_SIZE_MB" -ge "${{ env.MODELS_DIR_MIN_SIZE_MB }}" ]; then + echo "Open Model Zoo models downloaded ✅" + echo "Open Model Zoo models downloaded ✅" >> $GITHUB_STEP_SUMMARY + else + Open Model Zoo models not downloaded correctly - directory too small ❌" + Open Model Zoo models not downloaded correctly - directory too small ❌" >> $GITHUB_STEP_SUMMARY + fi + fi + + - name: Manage public models directories after success + if: ${{ steps.verify-public.outcome == 'success' && steps.download-public.outcome == 'success' }} + run: | + [ -d "${{ env.MODELS_PATH }}/public_old_old" ] && rm -rf "${{ env.MODELS_PATH }}/public_old_old" + echo "New public models set up ✅" >> $GITHUB_STEP_SUMMARY + + - name: Manage OMZ models directories after success + if: ${{ steps.verify-omz.outcome == 'success' && steps.download-omz.outcome == 'success' }} + run: | + [ -d "${{ env.MODELS_PATH }}/intel_old_old" ] && rm -rf "${{ env.MODELS_PATH }}/intel_old_old" + echo "New Open Model Zoo models set up ✅" >> $GITHUB_STEP_SUMMARY + + - name: Restore public models directories after fail + if: ${{ always() && steps.download-public.outcome != 'skipped' && !(steps.verify-public.outcome == 'success' && steps.download-public.outcome == 'success') }} + run: | + [ -d "${{ env.MODELS_PATH }}/public" ] && rm -rf "${{ env.MODELS_PATH }}/public" + [ -d "${{ env.MODELS_PATH }}/public_old" ] && mv "${{ env.MODELS_PATH }}/public_old" "${{ env.MODELS_PATH }}/public" + [ -d "${{ env.MODELS_PATH }}/public_old_old" ] && mv "${{ env.MODELS_PATH }}/public_old_old" "${{ env.MODELS_PATH }}/public_old" + echo "Old public models restored" >> $GITHUB_STEP_SUMMARY + + - name: Restore OMZ models directories after fail + if: ${{ always() && steps.download-omz.outcome != 'skipped' && !(steps.verify-omz.outcome == 'success' && steps.download-omz.outcome == 'success') }} + run: | + [ -d "${{ env.MODELS_PATH }}/intel" ] && rm -rf "${{ env.MODELS_PATH }}/intel" + [ -d "${{ env.MODELS_PATH }}/intel_old" ] && mv "${{ env.MODELS_PATH }}/intel_old" "${{ env.MODELS_PATH }}/intel" + [ -d "${{ env.MODELS_PATH }}/intel_old_old" ] && mv "${{ env.MODELS_PATH }}/intel_old_old" "${{ env.MODELS_PATH }}/intel_old" + echo "Old Open Model Zoo models restored" >> $GITHUB_STEP_SUMMARY + + - name: Clean up + if: always() + run: | + rm -rf dlstreamer-repo .virtualenvs + rm -rf $HOME/.virtualenvs diff --git a/.github/workflows/dls-pr-workflow.yaml b/.github/workflows/dls-pr-workflow.yaml index 6091b4de..14522ad6 100644 --- a/.github/workflows/dls-pr-workflow.yaml +++ b/.github/workflows/dls-pr-workflow.yaml @@ -1,416 +1,416 @@ -name: "[DLS] PR workflow" -run-name: "[DLS] PR workflow (by @${{ github.actor }} via ${{ github.event_name }})" -on: - push: - branches: - - 'main' - paths: - - 'cmake/**' - - 'docker/**' - - 'include/**' - - 'python/**' - - 'samples/**' - - 'scripts/**' - - 'src/**' - - 'tests/**' - - 'thirdparty/**' - pull_request: - paths: - - 'cmake/**' - - 'docker/**' - - 'include/**' - - 'python/**' - - 'samples/**' - - 'scripts/**' - - 'src/**' - - 'tests/**' - - 'thirdparty/**' - workflow_call: - inputs: - test-repo-branch: - description: "Branch in dl-streamer-tests repo (default is main)" - required: false - type: string - default: "main" - workflow_dispatch: - inputs: - test-repo-branch: - description: "Branch in dl-streamer-tests repo (default is main)" - required: false - type: string - default: "main" -permissions: {} - -jobs: - # ------------------------------------------------------------- SCANs ------------------------------------------------------------- - dls-code-style: - permissions: - contents: read - name: "DLS SCAN: code-style" - runs-on: ubuntu-latest - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Remove thirdparty folder - run: | - rm -rf dlstreamer-repo/thirdparty - rm -rf thirdparty - - - name: Code-style action - uses: ./dlstreamer-repo/.github/actions/common/code-style - with: - target_dir: "dlstreamer-repo" - name: 'DLS_code-style-check-report' - fail-on-findings: true - - - name: Clean up - run: | - sudo rm -rf dlstreamer-repo - - dls-check-license-headers: - permissions: - contents: read - name: "DLS SCAN: check license headers" - runs-on: ubuntu-latest - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - fetch-depth: 0 - path: dlstreamer-repo - - - name: Check license headers - uses: ./dlstreamer-repo/.github/actions/common/license-namespace-checker - with: - name: 'DLS_license-check-report' - path: 'dlstreamer-repo' - fail-on-findings: true - - - name: Clean up - run: | - sudo rm -rf dlstreamer-repo - - # dls-static-code-analysis: - # permissions: - # security-events: write - # actions: read - # contents: read - # packages: read - # name: "DLS SCAN: static code analysis" - # uses: dlstreamer-repo/.github/workflows/codeql.yaml - - dls-static-code-analysis-for-c-cpp: - permissions: - security-events: write - actions: read - contents: read - packages: read - name: "DLS SCAN: static C/C++ code analysis" - uses: ./.github/workflows/dls-coverity.yaml - secrets: - DLS_COVERITY_TOKEN: ${{ secrets.DLS_COVERITY_TOKEN }} - DLS_COVERITY_EMAIL: ${{ secrets.DLS_COVERITY_EMAIL }} - DLS_COVERITY_PROJECT: ${{ secrets.DLS_COVERITY_PROJECT }} - - dls-filter-docker-related-changes: - permissions: - contents: read - name: "DLS SCAN: detect changes in docker dir" - runs-on: ubuntu-latest - outputs: - docker_changed: ${{ steps.check.outputs.docker_changed }} - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - fetch-depth: 0 - path: dlstreamer-repo - - - name: Fetch main branch for comparison - run: | - cd dlstreamer-repo - git fetch origin main - - - name: Detect changes in docker directory - id: check - run: | - echo "🔍 Checking for changes in 'docker/'..." - cd dlstreamer-repo - CHANGED_FILES=$(git diff --name-only origin/main...HEAD -- 'docker/') - - if [ -n "${CHANGED_FILES}" ]; then - echo "📄 Changed Docker-related files:" - echo "${CHANGED_FILES}" - echo "docker_changed=true" >> "$GITHUB_OUTPUT" - echo "🟡 Docker-related changes detected." - else - echo "✅ No docker-related changes." - echo "docker_changed=false" >> "$GITHUB_OUTPUT" - fi - - - name: Clean up - run: | - sudo rm -rf dlstreamer-repo - - dls-trivy-config-scan: - permissions: - contents: read - needs: [dls-filter-docker-related-changes] - if: needs.dls-filter-docker-related-changes.outputs.docker_changed == 'true' - name: "DLS SCAN: Trivy ${{ matrix.name }}" - strategy: - fail-fast: false - matrix: - include: - - name: dlstreamer_ubuntu22 - path: dlstreamer-repo/docker/ubuntu/ubuntu22.Dockerfile - output: reports/trivy-DLS_ubuntu22.json - - name: dlstreamer_ubuntu24 - path: dlstreamer-repo/docker/ubuntu/ubuntu24.Dockerfile - output: reports/trivy-DLS_ubuntu24.json - - name: dlstreamer_fedora41 - path: dlstreamer-repo/docker/fedora41/fedora41.Dockerfile - output: reports/trivy-DLS_fedora41.json - - name: dlstreamer-testing-ubuntu - path: dlstreamer-repo/docker/ubuntu/ubuntu-testing.Dockerfile - output: reports/trivy-DLS-testing-ubuntu.json - uses: ./.github/workflows/trivy-config-mode.yaml - with: - dockerfile-path: ${{ matrix.path }} - trivy-report-format: 'json' - severity-levels: 'HIGH,CRITICAL' - output-report-path: ${{ matrix.output }} - name: DLS_${{ matrix.name }} - - dls-hadolint: - permissions: - contents: read - pull-requests: write - needs: [dls-filter-docker-related-changes] - if: needs.dls-filter-docker-related-changes.outputs.docker_changed == 'true' - name: "DLS SCAN: Hadolint" - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - include: - - name: dlstreamer_ubuntu22 - dockerfile: dlstreamer-repo/docker/ubuntu/ubuntu22.Dockerfile - output-file: hadolint-dlstreamer_ubuntu22.json - - name: dlstreamer_ubuntu24 - dockerfile: dlstreamer-repo/docker/ubuntu/ubuntu24.Dockerfile - output-file: hadolint-dlstreamer_ubuntu24.json - - name: dlstreamer_fedora41 - dockerfile: dlstreamer-repo/docker/fedora41/fedora41.Dockerfile - output-file: hadolint-dlstreamer_fedora41.json - - name: dlstreamer-testing-ubuntu - dockerfile: dlstreamer-repo/docker/ubuntu/ubuntu-testing.Dockerfile - output-file: hadolint-dlstreamer-testing-ubuntu.json - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Run Hadolint Action - uses: ./dlstreamer-repo/.github/actions/common/hadolint - with: - dockerfile: ${{ matrix.dockerfile }} - output-file: ${{ matrix.output-file }} - name: DLS_${{ matrix.name }} - enable-reviewdog: true - github_token: ${{ secrets.GITHUB_TOKEN }} - fail-on-findings: true - - - name: Clean up - run: | - sudo rm -rf dlstreamer-repo - - dls-pylint: - permissions: - contents: read - pull-requests: write - name: "DLS SCAN: pylint" - runs-on: ubuntu-latest - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Run pylint - uses: ./dlstreamer-repo/.github/actions/common/pylint - with: - path: dlstreamer-repo - output-file: pylint-report.txt - name: DLS_pylint - enable-reviewdog: true - github_token: ${{ secrets.GITHUB_TOKEN }} - fail-on-findings: true - - - name: Clean up - run: | - sudo rm -rf dlstreamer-repo - - dls-shellcheck: - permissions: - contents: read - pull-requests: write - name: "DLS SCAN: shellcheck" - runs-on: ubuntu-latest - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - fetch-depth: 0 - - - name: Run shellcheck - uses: ./.github/actions/common/shellcheck - with: - path: . - output-file: shellcheck-report.txt - name: DLS_shellcheck - enable-reviewdog: true - github_token: ${{ secrets.GITHUB_TOKEN }} - fail-on-findings: true - - dls-yamllint: - permissions: - contents: read - pull-requests: write - name: "DLS SCAN: yamllint" - runs-on: ubuntu-latest - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Run yamlint - uses: ./dlstreamer-repo/.github/actions/common/yamllint - with: - path: dlstreamer-repo - output-file: yamllint-report.txt - name: DLS_yamlint - enable-reviewdog: true - github_token: ${{ secrets.GITHUB_TOKEN }} - fail-on-findings: true - - - name: Clean up - run: | - sudo rm -rf dlstreamer-repo - - dls-clamav: - permissions: - contents: read - name: "DLS SCAN: ClamAV antivirus" - runs-on: ubuntu-latest - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: ClamAV scan - id: clamav-dls-scan - uses: open-edge-platform/orch-ci/.github/actions/security/clamav@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 - with: - scan-scope: all - paths: dlstreamer-repo - report_suffix: "DLS_ClamAV_antivirus_report" - fail-on-findings: true - - - name: Analyze ClamAV results - if: always() - env: - REPORT_PATH: ${{ steps.clamav-dls-scan.outputs.report_path }} - run: | - if [ -n "$REPORT_PATH" ] && [ -f "$REPORT_PATH" ]; then - echo "📄 Found ClamAV report: $REPORT_PATH" - - # Extract scan summary using jq - files_scanned=$(jq -r '.scan_summary.files_scanned // 0' "$REPORT_PATH" 2>/dev/null || echo "0") - threats_found=$(jq -r '.scan_summary.threats_found // 0' "$REPORT_PATH" 2>/dev/null || echo "0") - - echo "### ClamAV Antivirus Scan Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "- 📁 **Files scanned**: $files_scanned" >> $GITHUB_STEP_SUMMARY - echo "- đŸĻ  **Threats found**: $threats_found" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - if [ "$threats_found" -gt 0 ]; then - echo "❌ **Security Alert**: Malware or threats detected!" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "âš ī¸ **Action Required**: Review the ClamAV report artifact for details." >> $GITHUB_STEP_SUMMARY - else - echo "✅ **All files are clean - no threats detected!**" >> $GITHUB_STEP_SUMMARY - fi - else - echo "### ClamAV Antivirus Scan Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "âš ī¸ **ClamAV report not found at path: ${REPORT_PATH:-not provided}**" >> $GITHUB_STEP_SUMMARY - fi - - - name: Clean up - if: always() - run: | - sudo rm -rf dlstreamer-repo - - dls-bandit: - permissions: - contents: read - name: "DLS SCAN: Bandit" - runs-on: ubuntu-latest - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - - - name: Run Bandit scan - uses: open-edge-platform/orch-ci/.github/actions/security/bandit@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 - with: - scan-scope: "changed" - severity-level: "HIGH" - confidence-level: "HIGH" - output-format: "txt" - fail-on-findings: true - paths: . - report_suffix: dlstreamer - - # -------------------------------------------------------- BUILDS & TESTS --------------------------------------------------------- - dls-build-dev-images-and-run-unit-tests: - permissions: - contents: read - packages: read - name: "DLS BUILD: dev imgs & run unit tests" - uses: ./.github/workflows/dls-build-dev-docker-images-and-run-unit.yaml - - dls-build-and-test-deb-and-deb_img: - permissions: - contents: read - packages: read - name: "DLS BUILD & TEST: .deb pkgs & img" - uses: ./.github/workflows/dls-build-and-test-deb_pkgs-and-deb_imgs.yaml - with: - test-repo-branch: ${{ inputs.test-repo-branch }} - - dls-build-and-test-windows: - permissions: - contents: read - packages: read - name: "DLS BUILD & TEST: Windows DLLs" - uses: ./.github/workflows/dls-build-and-test-windows.yaml - with: - test-repo-branch: ${{ inputs.test-repo-branch }} +name: "[DLS] PR workflow" +run-name: "[DLS] PR workflow (by @${{ github.actor }} via ${{ github.event_name }})" +on: + push: + branches: + - 'main' + paths: + - 'cmake/**' + - 'docker/**' + - 'include/**' + - 'python/**' + - 'samples/**' + - 'scripts/**' + - 'src/**' + - 'tests/**' + - 'thirdparty/**' + pull_request: + paths: + - 'cmake/**' + - 'docker/**' + - 'include/**' + - 'python/**' + - 'samples/**' + - 'scripts/**' + - 'src/**' + - 'tests/**' + - 'thirdparty/**' + workflow_call: + inputs: + test-repo-branch: + description: "Branch in dl-streamer-tests repo (default is main)" + required: false + type: string + default: "main" + workflow_dispatch: + inputs: + test-repo-branch: + description: "Branch in dl-streamer-tests repo (default is main)" + required: false + type: string + default: "main" +permissions: {} + +jobs: + # ------------------------------------------------------------- SCANs ------------------------------------------------------------- + dls-code-style: + permissions: + contents: read + name: "DLS SCAN: code-style" + runs-on: ubuntu-latest + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Remove thirdparty folder + run: | + rm -rf dlstreamer-repo/thirdparty + rm -rf thirdparty + + - name: Code-style action + uses: ./dlstreamer-repo/.github/actions/common/code-style + with: + target_dir: "dlstreamer-repo" + name: 'DLS_code-style-check-report' + fail-on-findings: true + + - name: Clean up + run: | + sudo rm -rf dlstreamer-repo + + dls-check-license-headers: + permissions: + contents: read + name: "DLS SCAN: check license headers" + runs-on: ubuntu-latest + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + fetch-depth: 0 + path: dlstreamer-repo + + - name: Check license headers + uses: ./dlstreamer-repo/.github/actions/common/license-namespace-checker + with: + name: 'DLS_license-check-report' + path: 'dlstreamer-repo' + fail-on-findings: true + + - name: Clean up + run: | + sudo rm -rf dlstreamer-repo + + # dls-static-code-analysis: + # permissions: + # security-events: write + # actions: read + # contents: read + # packages: read + # name: "DLS SCAN: static code analysis" + # uses: dlstreamer-repo/.github/workflows/codeql.yaml + + dls-static-code-analysis-for-c-cpp: + permissions: + security-events: write + actions: read + contents: read + packages: read + name: "DLS SCAN: static C/C++ code analysis" + uses: ./.github/workflows/dls-coverity.yaml + secrets: + DLS_COVERITY_TOKEN: ${{ secrets.DLS_COVERITY_TOKEN }} + DLS_COVERITY_EMAIL: ${{ secrets.DLS_COVERITY_EMAIL }} + DLS_COVERITY_PROJECT: ${{ secrets.DLS_COVERITY_PROJECT }} + + dls-filter-docker-related-changes: + permissions: + contents: read + name: "DLS SCAN: detect changes in docker dir" + runs-on: ubuntu-latest + outputs: + docker_changed: ${{ steps.check.outputs.docker_changed }} + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + fetch-depth: 0 + path: dlstreamer-repo + + - name: Fetch main branch for comparison + run: | + cd dlstreamer-repo + git fetch origin main + + - name: Detect changes in docker directory + id: check + run: | + echo "🔍 Checking for changes in 'docker/'..." + cd dlstreamer-repo + CHANGED_FILES=$(git diff --name-only origin/main...HEAD -- 'docker/') + + if [ -n "${CHANGED_FILES}" ]; then + echo "📄 Changed Docker-related files:" + echo "${CHANGED_FILES}" + echo "docker_changed=true" >> "$GITHUB_OUTPUT" + echo "🟡 Docker-related changes detected." + else + echo "✅ No docker-related changes." + echo "docker_changed=false" >> "$GITHUB_OUTPUT" + fi + + - name: Clean up + run: | + sudo rm -rf dlstreamer-repo + + dls-trivy-config-scan: + permissions: + contents: read + needs: [dls-filter-docker-related-changes] + if: needs.dls-filter-docker-related-changes.outputs.docker_changed == 'true' + name: "DLS SCAN: Trivy ${{ matrix.name }}" + strategy: + fail-fast: false + matrix: + include: + - name: dlstreamer_ubuntu22 + path: dlstreamer-repo/docker/ubuntu/ubuntu22.Dockerfile + output: reports/trivy-DLS_ubuntu22.json + - name: dlstreamer_ubuntu24 + path: dlstreamer-repo/docker/ubuntu/ubuntu24.Dockerfile + output: reports/trivy-DLS_ubuntu24.json + - name: dlstreamer_fedora41 + path: dlstreamer-repo/docker/fedora41/fedora41.Dockerfile + output: reports/trivy-DLS_fedora41.json + - name: dlstreamer-testing-ubuntu + path: dlstreamer-repo/docker/ubuntu/ubuntu-testing.Dockerfile + output: reports/trivy-DLS-testing-ubuntu.json + uses: ./.github/workflows/trivy-config-mode.yaml + with: + dockerfile-path: ${{ matrix.path }} + trivy-report-format: 'json' + severity-levels: 'HIGH,CRITICAL' + output-report-path: ${{ matrix.output }} + name: DLS_${{ matrix.name }} + + dls-hadolint: + permissions: + contents: read + pull-requests: write + needs: [dls-filter-docker-related-changes] + if: needs.dls-filter-docker-related-changes.outputs.docker_changed == 'true' + name: "DLS SCAN: Hadolint" + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - name: dlstreamer_ubuntu22 + dockerfile: dlstreamer-repo/docker/ubuntu/ubuntu22.Dockerfile + output-file: hadolint-dlstreamer_ubuntu22.json + - name: dlstreamer_ubuntu24 + dockerfile: dlstreamer-repo/docker/ubuntu/ubuntu24.Dockerfile + output-file: hadolint-dlstreamer_ubuntu24.json + - name: dlstreamer_fedora41 + dockerfile: dlstreamer-repo/docker/fedora41/fedora41.Dockerfile + output-file: hadolint-dlstreamer_fedora41.json + - name: dlstreamer-testing-ubuntu + dockerfile: dlstreamer-repo/docker/ubuntu/ubuntu-testing.Dockerfile + output-file: hadolint-dlstreamer-testing-ubuntu.json + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Run Hadolint Action + uses: ./dlstreamer-repo/.github/actions/common/hadolint + with: + dockerfile: ${{ matrix.dockerfile }} + output-file: ${{ matrix.output-file }} + name: DLS_${{ matrix.name }} + enable-reviewdog: true + github_token: ${{ secrets.GITHUB_TOKEN }} + fail-on-findings: true + + - name: Clean up + run: | + sudo rm -rf dlstreamer-repo + + dls-pylint: + permissions: + contents: read + pull-requests: write + name: "DLS SCAN: pylint" + runs-on: ubuntu-latest + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Run pylint + uses: ./dlstreamer-repo/.github/actions/common/pylint + with: + path: dlstreamer-repo + output-file: pylint-report.txt + name: DLS_pylint + enable-reviewdog: true + github_token: ${{ secrets.GITHUB_TOKEN }} + fail-on-findings: true + + - name: Clean up + run: | + sudo rm -rf dlstreamer-repo + + dls-shellcheck: + permissions: + contents: read + pull-requests: write + name: "DLS SCAN: shellcheck" + runs-on: ubuntu-latest + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Run shellcheck + uses: ./.github/actions/common/shellcheck + with: + path: . + output-file: shellcheck-report.txt + name: DLS_shellcheck + enable-reviewdog: true + github_token: ${{ secrets.GITHUB_TOKEN }} + fail-on-findings: true + + dls-yamllint: + permissions: + contents: read + pull-requests: write + name: "DLS SCAN: yamllint" + runs-on: ubuntu-latest + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Run yamlint + uses: ./dlstreamer-repo/.github/actions/common/yamllint + with: + path: dlstreamer-repo + output-file: yamllint-report.txt + name: DLS_yamlint + enable-reviewdog: true + github_token: ${{ secrets.GITHUB_TOKEN }} + fail-on-findings: true + + - name: Clean up + run: | + sudo rm -rf dlstreamer-repo + + dls-clamav: + permissions: + contents: read + name: "DLS SCAN: ClamAV antivirus" + runs-on: ubuntu-latest + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: ClamAV scan + id: clamav-dls-scan + uses: open-edge-platform/orch-ci/.github/actions/security/clamav@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 + with: + scan-scope: all + paths: dlstreamer-repo + report_suffix: "DLS_ClamAV_antivirus_report" + fail-on-findings: true + + - name: Analyze ClamAV results + if: always() + env: + REPORT_PATH: ${{ steps.clamav-dls-scan.outputs.report_path }} + run: | + if [ -n "$REPORT_PATH" ] && [ -f "$REPORT_PATH" ]; then + echo "📄 Found ClamAV report: $REPORT_PATH" + + # Extract scan summary using jq + files_scanned=$(jq -r '.scan_summary.files_scanned // 0' "$REPORT_PATH" 2>/dev/null || echo "0") + threats_found=$(jq -r '.scan_summary.threats_found // 0' "$REPORT_PATH" 2>/dev/null || echo "0") + + echo "### ClamAV Antivirus Scan Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- 📁 **Files scanned**: $files_scanned" >> $GITHUB_STEP_SUMMARY + echo "- đŸĻ  **Threats found**: $threats_found" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + if [ "$threats_found" -gt 0 ]; then + echo "❌ **Security Alert**: Malware or threats detected!" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "âš ī¸ **Action Required**: Review the ClamAV report artifact for details." >> $GITHUB_STEP_SUMMARY + else + echo "✅ **All files are clean - no threats detected!**" >> $GITHUB_STEP_SUMMARY + fi + else + echo "### ClamAV Antivirus Scan Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "âš ī¸ **ClamAV report not found at path: ${REPORT_PATH:-not provided}**" >> $GITHUB_STEP_SUMMARY + fi + + - name: Clean up + if: always() + run: | + sudo rm -rf dlstreamer-repo + + dls-bandit: + permissions: + contents: read + name: "DLS SCAN: Bandit" + runs-on: ubuntu-latest + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + + - name: Run Bandit scan + uses: open-edge-platform/orch-ci/.github/actions/security/bandit@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 + with: + scan-scope: "changed" + severity-level: "HIGH" + confidence-level: "HIGH" + output-format: "txt" + fail-on-findings: true + paths: . + report_suffix: dlstreamer + + # -------------------------------------------------------- BUILDS & TESTS --------------------------------------------------------- + dls-build-dev-images-and-run-unit-tests: + permissions: + contents: read + packages: read + name: "DLS BUILD: dev imgs & run unit tests" + uses: ./.github/workflows/dls-build-dev-docker-images-and-run-unit.yaml + + dls-build-and-test-deb-and-deb_img: + permissions: + contents: read + packages: read + name: "DLS BUILD & TEST: .deb pkgs & img" + uses: ./.github/workflows/dls-build-and-test-deb_pkgs-and-deb_imgs.yaml + with: + test-repo-branch: ${{ inputs.test-repo-branch }} + + dls-build-and-test-windows: + permissions: + contents: read + packages: read + name: "DLS BUILD & TEST: Windows DLLs" + uses: ./.github/workflows/dls-build-and-test-windows.yaml + with: + test-repo-branch: ${{ inputs.test-repo-branch }} diff --git a/.github/workflows/dls-weekly-cached-images.yaml b/.github/workflows/dls-weekly-cached-images.yaml index 3c51a19e..05e0f760 100644 --- a/.github/workflows/dls-weekly-cached-images.yaml +++ b/.github/workflows/dls-weekly-cached-images.yaml @@ -1,463 +1,463 @@ -name: "[DLS] [U22/U24] Build weekly cache imgs" -run-name: "[DLS] [U22/U24] Build weekly cache imgs" -on: - schedule: - - cron: '0 5 * * MON' # 5:00 UTC each Monday - workflow_dispatch: - inputs: - image-tag: - description: 'Image tag' - required: false - type: string - action-type: - description: 'Choose if this run is weekly build or cache update' - required: true - type: choice - options: - - 'weekly' - - 'cache' -permissions: {} -env: - dlstreamer-version: "2025.2.0" - DLS_REL_PATH: "./dlstreamer-repo" - -jobs: - build-dls-deb-img: - name: Build DLS ${{ matrix.ubuntu_version }} .deb and deb img - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - id-token: write - strategy: - fail-fast: false - matrix: - include: - - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu22.Dockerfile - ubuntu_version: ubuntu22 - - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu24.Dockerfile - ubuntu_version: ubuntu24 - outputs: - ubuntu22_image: ${{ steps.save-image-ubuntu22.outputs.image }} - ubuntu24_image: ${{ steps.save-image-ubuntu24.outputs.image }} - steps: - - name: Determine image tag (input or default) - id: set-tag - run: | - if [ -z "${{ github.event.inputs['image-tag'] }}" ]; then - echo "No image-tag provided — using default value with commit sha" - VALUE="${{ github.sha }}" - else - echo "Using provided image-tag: ${{ github.event.inputs['image-tag'] }}" - VALUE="${{ github.event.inputs['image-tag'] }}" - fi - echo "IMAGE_TAG=$VALUE" >> $GITHUB_ENV - - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Init submodules - run: | - cd dlstreamer-repo - git submodule update --init thirdparty/spdlog - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 #3.11.1 - - - name: Log in to GitHub Container Registry - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef #3.6.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build deb final img with cache from GHCR - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - deb_final_img_cached: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:buildcache - run: | - docker buildx build \ - --load \ - --target dlstreamer \ - --tag "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }}" \ - --cache-to=type=registry,ref=${deb_final_img_cached},mode=max \ - --cache-from="${deb_final_img_cached}" \ - --build-arg DLSTREAMER_VERSION=${{ env.dlstreamer-version }} \ - --build-arg DLSTREAMER_BUILD_NUMBER=deb-pkg-${{ matrix.ubuntu_version }} \ - -f ${{ matrix.path_dockerfile }} \ - ${{ env.DLS_REL_PATH }} - - # ======================================================== SCANNING PART ======================================================== - - name: 🔍 Scan Docker image with Trivy - uses: open-edge-platform/orch-ci/.github/actions/security/trivy@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 - with: - scan_target: "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${{ env.IMAGE_TAG }}-${{ matrix.ubuntu_version }}" - severity: "HIGH" - scan_type: image - format: table - report_suffix: "-${{ matrix.ubuntu_version }}-deb-img" - scan-scope: all - timeout: 20m - ignore_unfixed: true - - - name: Fail if vulnerabilities > 0 in Trivy results - run: | - file=$(ls security-results/trivy/trivy-results-* | head -n 1) - cat $file - vuln_count=$(awk '/│/ && /Vulnerabilities/ {next} /│/ {gsub(/ /, "", $0); split($0, cols, "│"); print cols[4]}' "$file" | grep -v '-' | head -n 1) - echo "Found vulnerability count: $vuln_count" - if [[ "$vuln_count" != "0" ]]; then - echo "❌ Vulnerabilities found: $vuln_count" - exit 1 - else - echo "✅ No vulnerabilities found." - fi - shell: bash - - - name: Push deb final img to GHCR - if: ${{ inputs.action-type == 'weekly' }} - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: | - docker push "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }}" - - - name: Install Cosign - if: ${{ inputs.action-type == 'weekly' }} - uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3.9.1 - - - name: Install skopeo - if: ${{ inputs.action-type == 'weekly' }} - run: sudo apt update && sudo apt install -y skopeo jq - - - name: Get image digest - if: ${{ inputs.action-type == 'weekly' }} - id: digest - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: | - IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }} - DIGEST=$(skopeo inspect docker://$IMAGE | jq -r '.Digest') - echo "digest=${DIGEST}" >> $GITHUB_OUTPUT - - - name: Sign Docker image using Cosign (keyless) - if: ${{ inputs.action-type == 'weekly' }} - env: - deb_final_img: ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer@${{ steps.digest.outputs.digest }} - run: | - cosign sign --yes ${deb_final_img} - - - name: Save Ubuntu 22 image info - id: save-image-ubuntu22 - if: ${{ matrix.ubuntu_version == 'ubuntu22' }} - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: echo "image=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }}" >> "$GITHUB_OUTPUT" - - - name: Save Ubuntu 24 image info - id: save-image-ubuntu24 - if: ${{ matrix.ubuntu_version == 'ubuntu24' }} - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: echo "image=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }}" >> "$GITHUB_OUTPUT" - - - name: Clean up - if: always() - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: | - rm -rf dlstreamer-repo - docker rmi ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }} || true - - build-dls-dev-img: - name: Build DLS ${{ matrix.ubuntu_version }} dev debug img - runs-on: [dls, ubuntu] - permissions: - contents: read - packages: write - id-token: write - strategy: - fail-fast: false - matrix: - include: - - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu22.Dockerfile - ubuntu_version: ubuntu22 - build_arg: Debug - - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu24.Dockerfile - ubuntu_version: ubuntu24 - build_arg: Debug - steps: - - name: Determine image tag (input or default) - id: set-tag - run: | - if [ -z "${{ github.event.inputs['image-tag'] }}" ]; then - echo "No image-tag provided — using default value with commit sha" - VALUE="${{ github.sha }}" - else - echo "Using provided image-tag: ${{ github.event.inputs['image-tag'] }}" - VALUE="${{ github.event.inputs['image-tag'] }}" - fi - echo "IMAGE_TAG=$VALUE" >> $GITHUB_ENV - - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Init submodules - run: | - cd dlstreamer-repo - git submodule update --init thirdparty/spdlog - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 #3.11.1 - - - name: Log in to GitHub Container Registry - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef #3.6.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build dev debug img with cache from GHCR - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - dev_debug_img_cached: ghcr.io/${{ github.repository }}/dev-debug-img-${{ matrix.ubuntu_version }}:buildcache - run: | - docker buildx build \ - --load \ - --target dlstreamer-dev \ - --tag "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-dev-${{ matrix.ubuntu_version }}" \ - --cache-from=type=registry,ref=${dev_debug_img_cached} \ - --cache-to=type=registry,ref=${dev_debug_img_cached},mode=max \ - --build-arg BUILD_ARG=${{ matrix.build_arg }} \ - -f ${{ matrix.path_dockerfile }} \ - ${{ env.DLS_REL_PATH }} - - # ======================================================== SCANNING PART ======================================================== - - name: Scan Docker image with Trivy - uses: open-edge-platform/orch-ci/.github/actions/security/trivy@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 - with: - scan_target: "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${{ env.IMAGE_TAG }}-dev-${{ matrix.ubuntu_version }}" - severity: "HIGH" - scan_type: image - format: table - report_suffix: ${{ matrix.ubuntu_version }}-dev-img - scan-scope: all - timeout: 20m - ignore_unfixed: true - - - name: Fail if vulnerabilities > 0 in Trivy results - run: | - file=$(ls security-results/trivy/trivy-results-* | head -n 1) - cat $file - vuln_count=$(awk '/│/ && /Vulnerabilities/ {next} /│/ {gsub(/ /, "", $0); split($0, cols, "│"); print cols[4]}' "$file" | grep -v '-' | head -n 1) - echo "Found vulnerability count: $vuln_count" - if [[ "$vuln_count" != "0" ]]; then - echo "❌ Vulnerabilities found: $vuln_count" - exit 1 - else - echo "✅ No vulnerabilities found." - fi - shell: bash - - - name: Push dev-debug final img to GHCR - if: ${{ inputs.action-type == 'weekly' }} - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: | - docker push "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-dev-${{ matrix.ubuntu_version }}" - - - name: Install Cosign - if: ${{ inputs.action-type == 'weekly' }} - uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3.9.1 - - - name: Install skopeo - if: ${{ inputs.action-type == 'weekly' }} - run: sudo apt update && sudo apt install -y skopeo jq - - - name: Get image digest - if: ${{ inputs.action-type == 'weekly' }} - id: digest - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: | - IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-dev-${{ matrix.ubuntu_version }} - DIGEST=$(skopeo inspect docker://$IMAGE | jq -r '.Digest') - echo "digest=${DIGEST}" >> $GITHUB_OUTPUT - - name: Sign Docker image using Cosign (keyless) - if: ${{ inputs.action-type == 'weekly' }} - env: - dev_debug_img: ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer@${{ steps.digest.outputs.digest }} - run: | - cosign sign --yes ${dev_debug_img} - - - name: Clean up - if: always() - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: | - rm -rf dlstreamer-repo - docker rmi ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-dev-${{ matrix.ubuntu_version }} || true - - - build-dls-pipeline-server-image: - if: ${{ inputs.action-type == 'weekly' }} - needs: build-dls-deb-img - name: Build DLS Pipeline Server ${{ matrix.ubuntu_version }} img - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - id-token: write - strategy: - fail-fast: false - matrix: - ubuntu_version: [ubuntu22, ubuntu24] - steps: - - name: Determine image tag (input or default) - id: set-tag - run: | - if [ -z "${{ github.event.inputs['image-tag'] }}" ]; then - echo "No image-tag provided — using default value with commit sha" - VALUE="${{ github.sha }}" - else - echo "Using provided image-tag: ${{ github.event.inputs['image-tag'] }}" - VALUE="${{ github.event.inputs['image-tag'] }}" - fi - echo "IMAGE_TAG=$VALUE" >> $GITHUB_ENV - - - name: Check out edge-ai-libraries repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - repository: open-edge-platform/edge-ai-libraries - persist-credentials: false - path: edge-ai-libraries-repo - - - name: Log in to GitHub Container Registry - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef #3.6.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Set BASE_IMAGE - env: - BASE_IMAGE22: ${{ needs.build-dls-deb-img.outputs.ubuntu22_image }} - BASE_IMAGE24: ${{ needs.build-dls-deb-img.outputs.ubuntu24_image }} - run: | - if [ "${{ matrix.ubuntu_version }}" == "ubuntu22" ]; then - echo "BASE_IMAGE=${BASE_IMAGE22}" >> $GITHUB_ENV - elif [ "${{ matrix.ubuntu_version }}" == "ubuntu24" ]; then - echo "BASE_IMAGE=${BASE_IMAGE24}" >> $GITHUB_ENV - fi - - name: Build dls-pipeline-server-img - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: | - cd edge-ai-libraries-repo/microservices/dlstreamer-pipeline-server/docker - export DLSTREAMER_PIPELINE_SERVER_IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-${{ matrix.ubuntu_version }} - export DLSTREAMER_PIPELINE_SERVER_DOCKERFILE=Dockerfile - docker compose build --no-cache --pull - export DLSTREAMER_PIPELINE_SERVER_IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-extended-${{ matrix.ubuntu_version }} - export BUILD_TARGET=dlstreamer-pipeline-server-extended - docker compose build --no-cache --pull - - # ======================================================== SCANNING PART ======================================================== - - name: Scan Docker image with Trivy - uses: open-edge-platform/orch-ci/.github/actions/security/trivy@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 - with: - scan_target: "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${{ env.IMAGE_TAG }}}-${{ matrix.ubuntu_version }}" - severity: "HIGH" - scan_type: image - format: table - report_suffix: ${{ matrix.ubuntu_version }}-edge-ai-dlstreamer-pipeline-server - scan-scope: all - timeout: 20m - ignore_unfixed: true - - name: Scan Docker Extended image with Trivy - uses: open-edge-platform/orch-ci/.github/actions/security/trivy@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 - with: - scan_target: "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${{ env.IMAGE_TAG }}-extended-${{ matrix.ubuntu_version }}" - severity: "HIGH" - scan_type: image - format: table - report_suffix: ${{ matrix.ubuntu_version }}-edge-ai-dlstreamer-pipeline-server-extended - scan-scope: all - timeout: 20m - ignore_unfixed: true - - - name: Fail if vulnerabilities > 0 in Trivy results - run: | - for file in security-results/trivy/trivy-results-*; do - echo "📄 Checking $file" - cat "$file" - vuln_count=$(awk '/│/ && /Vulnerabilities/ {next} /│/ {gsub(/ /, "", $0); split($0, cols, "│"); print cols[4]}' "$file" | grep -v '-' | head -n 1) - - echo "Found vulnerability count in $file: $vuln_count" - - if [[ "$vuln_count" != "0" ]]; then - echo "❌ Vulnerabilities found in $file: $vuln_count" - exit 1 - fi - done - echo "✅ No vulnerabilities found in any image." - shell: bash - - - name: Push Docker img - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: | - if [ "${{ matrix.ubuntu_version }}" == "ubuntu24" ]; then - docker tag ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-${{ matrix.ubuntu_version }} ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:latest - docker push ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:latest - fi - docker push ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-${{ matrix.ubuntu_version }} - docker push ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-extended-${{ matrix.ubuntu_version }} - - name: Install Cosign - uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3.9.1 - - - name: Install skopeo - run: sudo apt update && sudo apt install -y skopeo jq - - - name: Get image digest - id: digest - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: | - IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-${{ matrix.ubuntu_version }} - DIGEST=$(skopeo inspect docker://$IMAGE | jq -r '.Digest') - echo "digest=$DIGEST" >> $GITHUB_OUTPUT - IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-extended-${{ matrix.ubuntu_version }} - DIGEST=$(skopeo inspect docker://$IMAGE | jq -r '.Digest') - echo "digest_extended=$DIGEST" >> $GITHUB_OUTPUT - if [ "${{ matrix.ubuntu_version }}" == "ubuntu24" ]; then - DIGEST_LATEST=$(skopeo inspect docker://ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:latest | jq -r '.Digest') - echo "digest_latest=$DIGEST_LATEST" >> $GITHUB_OUTPUT - fi - - - name: Sign Docker image using Cosign (keyless) - env: - dlsps_img: ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server@${{ steps.digest.outputs.digest }} - dlsps_img_ext: ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server@${{ steps.digest.outputs.digest_extended }} - run: | - cosign sign --yes ${dlsps_img} - cosign sign --yes ${dlsps_img_ext} - - - name: Sign Docker image using Cosign (keyless) (latest) - if: ${{ matrix.ubuntu_version == 'ubuntu24' }} - env: - dlsps_img: ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server@${{ steps.digest.outputs.digest_latest }} - run: | - cosign sign --yes ${dlsps_img} - - name: Clean up - if: always() - env: - IMAGE_TAG: ${{ env.IMAGE_TAG }} - run: | - rm -rf edge-ai-libraries-repo - docker rmi ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-${{ matrix.ubuntu_version }} ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:latest || true - docker rmi ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-extended-${{ matrix.ubuntu_version }} || true +name: "[DLS] [U22/U24] Build weekly cache imgs" +run-name: "[DLS] [U22/U24] Build weekly cache imgs" +on: + schedule: + - cron: '0 5 * * MON' # 5:00 UTC each Monday + workflow_dispatch: + inputs: + image-tag: + description: 'Image tag' + required: false + type: string + action-type: + description: 'Choose if this run is weekly build or cache update' + required: true + type: choice + options: + - 'weekly' + - 'cache' +permissions: {} +env: + dlstreamer-version: "2025.2.0" + DLS_REL_PATH: "./dlstreamer-repo" + +jobs: + build-dls-deb-img: + name: Build DLS ${{ matrix.ubuntu_version }} .deb and deb img + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write + strategy: + fail-fast: false + matrix: + include: + - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu22.Dockerfile + ubuntu_version: ubuntu22 + - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu24.Dockerfile + ubuntu_version: ubuntu24 + outputs: + ubuntu22_image: ${{ steps.save-image-ubuntu22.outputs.image }} + ubuntu24_image: ${{ steps.save-image-ubuntu24.outputs.image }} + steps: + - name: Determine image tag (input or default) + id: set-tag + run: | + if [ -z "${{ github.event.inputs['image-tag'] }}" ]; then + echo "No image-tag provided — using default value with commit sha" + VALUE="${{ github.sha }}" + else + echo "Using provided image-tag: ${{ github.event.inputs['image-tag'] }}" + VALUE="${{ github.event.inputs['image-tag'] }}" + fi + echo "IMAGE_TAG=$VALUE" >> $GITHUB_ENV + + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Init submodules + run: | + cd dlstreamer-repo + git submodule update --init thirdparty/spdlog + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 #3.11.1 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef #3.6.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build deb final img with cache from GHCR + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + deb_final_img_cached: ghcr.io/${{ github.repository }}/deb-final-img-${{ matrix.ubuntu_version }}:buildcache + run: | + docker buildx build \ + --load \ + --target dlstreamer \ + --tag "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }}" \ + --cache-to=type=registry,ref=${deb_final_img_cached},mode=max \ + --cache-from="${deb_final_img_cached}" \ + --build-arg DLSTREAMER_VERSION=${{ env.dlstreamer-version }} \ + --build-arg DLSTREAMER_BUILD_NUMBER=deb-pkg-${{ matrix.ubuntu_version }} \ + -f ${{ matrix.path_dockerfile }} \ + ${{ env.DLS_REL_PATH }} + + # ======================================================== SCANNING PART ======================================================== + - name: 🔍 Scan Docker image with Trivy + uses: open-edge-platform/orch-ci/.github/actions/security/trivy@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 + with: + scan_target: "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${{ env.IMAGE_TAG }}-${{ matrix.ubuntu_version }}" + severity: "HIGH" + scan_type: image + format: table + report_suffix: "-${{ matrix.ubuntu_version }}-deb-img" + scan-scope: all + timeout: 20m + ignore_unfixed: true + + - name: Fail if vulnerabilities > 0 in Trivy results + run: | + file=$(ls security-results/trivy/trivy-results-* | head -n 1) + cat $file + vuln_count=$(awk '/│/ && /Vulnerabilities/ {next} /│/ {gsub(/ /, "", $0); split($0, cols, "│"); print cols[4]}' "$file" | grep -v '-' | head -n 1) + echo "Found vulnerability count: $vuln_count" + if [[ "$vuln_count" != "0" ]]; then + echo "❌ Vulnerabilities found: $vuln_count" + exit 1 + else + echo "✅ No vulnerabilities found." + fi + shell: bash + + - name: Push deb final img to GHCR + if: ${{ inputs.action-type == 'weekly' }} + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: | + docker push "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }}" + + - name: Install Cosign + if: ${{ inputs.action-type == 'weekly' }} + uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3.9.1 + + - name: Install skopeo + if: ${{ inputs.action-type == 'weekly' }} + run: sudo apt update && sudo apt install -y skopeo jq + + - name: Get image digest + if: ${{ inputs.action-type == 'weekly' }} + id: digest + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: | + IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }} + DIGEST=$(skopeo inspect docker://$IMAGE | jq -r '.Digest') + echo "digest=${DIGEST}" >> $GITHUB_OUTPUT + + - name: Sign Docker image using Cosign (keyless) + if: ${{ inputs.action-type == 'weekly' }} + env: + deb_final_img: ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer@${{ steps.digest.outputs.digest }} + run: | + cosign sign --yes ${deb_final_img} + + - name: Save Ubuntu 22 image info + id: save-image-ubuntu22 + if: ${{ matrix.ubuntu_version == 'ubuntu22' }} + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: echo "image=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }}" >> "$GITHUB_OUTPUT" + + - name: Save Ubuntu 24 image info + id: save-image-ubuntu24 + if: ${{ matrix.ubuntu_version == 'ubuntu24' }} + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: echo "image=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }}" >> "$GITHUB_OUTPUT" + + - name: Clean up + if: always() + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: | + rm -rf dlstreamer-repo + docker rmi ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-${{ matrix.ubuntu_version }} || true + + build-dls-dev-img: + name: Build DLS ${{ matrix.ubuntu_version }} dev debug img + runs-on: [dls, ubuntu] + permissions: + contents: read + packages: write + id-token: write + strategy: + fail-fast: false + matrix: + include: + - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu22.Dockerfile + ubuntu_version: ubuntu22 + build_arg: Debug + - path_dockerfile: ./dlstreamer-repo/docker/ubuntu/ubuntu24.Dockerfile + ubuntu_version: ubuntu24 + build_arg: Debug + steps: + - name: Determine image tag (input or default) + id: set-tag + run: | + if [ -z "${{ github.event.inputs['image-tag'] }}" ]; then + echo "No image-tag provided — using default value with commit sha" + VALUE="${{ github.sha }}" + else + echo "Using provided image-tag: ${{ github.event.inputs['image-tag'] }}" + VALUE="${{ github.event.inputs['image-tag'] }}" + fi + echo "IMAGE_TAG=$VALUE" >> $GITHUB_ENV + + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Init submodules + run: | + cd dlstreamer-repo + git submodule update --init thirdparty/spdlog + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 #3.11.1 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef #3.6.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build dev debug img with cache from GHCR + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + dev_debug_img_cached: ghcr.io/${{ github.repository }}/dev-debug-img-${{ matrix.ubuntu_version }}:buildcache + run: | + docker buildx build \ + --load \ + --target dlstreamer-dev \ + --tag "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-dev-${{ matrix.ubuntu_version }}" \ + --cache-from=type=registry,ref=${dev_debug_img_cached} \ + --cache-to=type=registry,ref=${dev_debug_img_cached},mode=max \ + --build-arg BUILD_ARG=${{ matrix.build_arg }} \ + -f ${{ matrix.path_dockerfile }} \ + ${{ env.DLS_REL_PATH }} + + # ======================================================== SCANNING PART ======================================================== + - name: Scan Docker image with Trivy + uses: open-edge-platform/orch-ci/.github/actions/security/trivy@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 + with: + scan_target: "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${{ env.IMAGE_TAG }}-dev-${{ matrix.ubuntu_version }}" + severity: "HIGH" + scan_type: image + format: table + report_suffix: ${{ matrix.ubuntu_version }}-dev-img + scan-scope: all + timeout: 20m + ignore_unfixed: true + + - name: Fail if vulnerabilities > 0 in Trivy results + run: | + file=$(ls security-results/trivy/trivy-results-* | head -n 1) + cat $file + vuln_count=$(awk '/│/ && /Vulnerabilities/ {next} /│/ {gsub(/ /, "", $0); split($0, cols, "│"); print cols[4]}' "$file" | grep -v '-' | head -n 1) + echo "Found vulnerability count: $vuln_count" + if [[ "$vuln_count" != "0" ]]; then + echo "❌ Vulnerabilities found: $vuln_count" + exit 1 + else + echo "✅ No vulnerabilities found." + fi + shell: bash + + - name: Push dev-debug final img to GHCR + if: ${{ inputs.action-type == 'weekly' }} + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: | + docker push "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-dev-${{ matrix.ubuntu_version }}" + + - name: Install Cosign + if: ${{ inputs.action-type == 'weekly' }} + uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3.9.1 + + - name: Install skopeo + if: ${{ inputs.action-type == 'weekly' }} + run: sudo apt update && sudo apt install -y skopeo jq + + - name: Get image digest + if: ${{ inputs.action-type == 'weekly' }} + id: digest + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: | + IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-dev-${{ matrix.ubuntu_version }} + DIGEST=$(skopeo inspect docker://$IMAGE | jq -r '.Digest') + echo "digest=${DIGEST}" >> $GITHUB_OUTPUT + - name: Sign Docker image using Cosign (keyless) + if: ${{ inputs.action-type == 'weekly' }} + env: + dev_debug_img: ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer@${{ steps.digest.outputs.digest }} + run: | + cosign sign --yes ${dev_debug_img} + + - name: Clean up + if: always() + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: | + rm -rf dlstreamer-repo + docker rmi ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer:${IMAGE_TAG}-dev-${{ matrix.ubuntu_version }} || true + + + build-dls-pipeline-server-image: + if: ${{ inputs.action-type == 'weekly' }} + needs: build-dls-deb-img + name: Build DLS Pipeline Server ${{ matrix.ubuntu_version }} img + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write + strategy: + fail-fast: false + matrix: + ubuntu_version: [ubuntu22, ubuntu24] + steps: + - name: Determine image tag (input or default) + id: set-tag + run: | + if [ -z "${{ github.event.inputs['image-tag'] }}" ]; then + echo "No image-tag provided — using default value with commit sha" + VALUE="${{ github.sha }}" + else + echo "Using provided image-tag: ${{ github.event.inputs['image-tag'] }}" + VALUE="${{ github.event.inputs['image-tag'] }}" + fi + echo "IMAGE_TAG=$VALUE" >> $GITHUB_ENV + + - name: Check out edge-ai-libraries repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + repository: open-edge-platform/edge-ai-libraries + persist-credentials: false + path: edge-ai-libraries-repo + + - name: Log in to GitHub Container Registry + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef #3.6.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set BASE_IMAGE + env: + BASE_IMAGE22: ${{ needs.build-dls-deb-img.outputs.ubuntu22_image }} + BASE_IMAGE24: ${{ needs.build-dls-deb-img.outputs.ubuntu24_image }} + run: | + if [ "${{ matrix.ubuntu_version }}" == "ubuntu22" ]; then + echo "BASE_IMAGE=${BASE_IMAGE22}" >> $GITHUB_ENV + elif [ "${{ matrix.ubuntu_version }}" == "ubuntu24" ]; then + echo "BASE_IMAGE=${BASE_IMAGE24}" >> $GITHUB_ENV + fi + - name: Build dls-pipeline-server-img + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: | + cd edge-ai-libraries-repo/microservices/dlstreamer-pipeline-server/docker + export DLSTREAMER_PIPELINE_SERVER_IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-${{ matrix.ubuntu_version }} + export DLSTREAMER_PIPELINE_SERVER_DOCKERFILE=Dockerfile + docker compose build --no-cache --pull + export DLSTREAMER_PIPELINE_SERVER_IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-extended-${{ matrix.ubuntu_version }} + export BUILD_TARGET=dlstreamer-pipeline-server-extended + docker compose build --no-cache --pull + + # ======================================================== SCANNING PART ======================================================== + - name: Scan Docker image with Trivy + uses: open-edge-platform/orch-ci/.github/actions/security/trivy@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 + with: + scan_target: "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${{ env.IMAGE_TAG }}}-${{ matrix.ubuntu_version }}" + severity: "HIGH" + scan_type: image + format: table + report_suffix: ${{ matrix.ubuntu_version }}-edge-ai-dlstreamer-pipeline-server + scan-scope: all + timeout: 20m + ignore_unfixed: true + - name: Scan Docker Extended image with Trivy + uses: open-edge-platform/orch-ci/.github/actions/security/trivy@76700c2fb6d547733b9218d9638dca43f5296399 # 0.1.52 + with: + scan_target: "ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${{ env.IMAGE_TAG }}-extended-${{ matrix.ubuntu_version }}" + severity: "HIGH" + scan_type: image + format: table + report_suffix: ${{ matrix.ubuntu_version }}-edge-ai-dlstreamer-pipeline-server-extended + scan-scope: all + timeout: 20m + ignore_unfixed: true + + - name: Fail if vulnerabilities > 0 in Trivy results + run: | + for file in security-results/trivy/trivy-results-*; do + echo "📄 Checking $file" + cat "$file" + vuln_count=$(awk '/│/ && /Vulnerabilities/ {next} /│/ {gsub(/ /, "", $0); split($0, cols, "│"); print cols[4]}' "$file" | grep -v '-' | head -n 1) + + echo "Found vulnerability count in $file: $vuln_count" + + if [[ "$vuln_count" != "0" ]]; then + echo "❌ Vulnerabilities found in $file: $vuln_count" + exit 1 + fi + done + echo "✅ No vulnerabilities found in any image." + shell: bash + + - name: Push Docker img + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: | + if [ "${{ matrix.ubuntu_version }}" == "ubuntu24" ]; then + docker tag ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-${{ matrix.ubuntu_version }} ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:latest + docker push ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:latest + fi + docker push ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-${{ matrix.ubuntu_version }} + docker push ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-extended-${{ matrix.ubuntu_version }} + - name: Install Cosign + uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3.9.1 + + - name: Install skopeo + run: sudo apt update && sudo apt install -y skopeo jq + + - name: Get image digest + id: digest + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: | + IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-${{ matrix.ubuntu_version }} + DIGEST=$(skopeo inspect docker://$IMAGE | jq -r '.Digest') + echo "digest=$DIGEST" >> $GITHUB_OUTPUT + IMAGE=ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-extended-${{ matrix.ubuntu_version }} + DIGEST=$(skopeo inspect docker://$IMAGE | jq -r '.Digest') + echo "digest_extended=$DIGEST" >> $GITHUB_OUTPUT + if [ "${{ matrix.ubuntu_version }}" == "ubuntu24" ]; then + DIGEST_LATEST=$(skopeo inspect docker://ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:latest | jq -r '.Digest') + echo "digest_latest=$DIGEST_LATEST" >> $GITHUB_OUTPUT + fi + + - name: Sign Docker image using Cosign (keyless) + env: + dlsps_img: ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server@${{ steps.digest.outputs.digest }} + dlsps_img_ext: ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server@${{ steps.digest.outputs.digest_extended }} + run: | + cosign sign --yes ${dlsps_img} + cosign sign --yes ${dlsps_img_ext} + + - name: Sign Docker image using Cosign (keyless) (latest) + if: ${{ matrix.ubuntu_version == 'ubuntu24' }} + env: + dlsps_img: ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server@${{ steps.digest.outputs.digest_latest }} + run: | + cosign sign --yes ${dlsps_img} + - name: Clean up + if: always() + env: + IMAGE_TAG: ${{ env.IMAGE_TAG }} + run: | + rm -rf edge-ai-libraries-repo + docker rmi ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-${{ matrix.ubuntu_version }} ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:latest || true + docker rmi ghcr.io/${{ github.repository }}/intel/edge-ai-dlstreamer-pipeline-server:${IMAGE_TAG}-extended-${{ matrix.ubuntu_version }} || true diff --git a/.github/workflows/trivy-config-mode.yaml b/.github/workflows/trivy-config-mode.yaml index 267bfb07..85db90f9 100644 --- a/.github/workflows/trivy-config-mode.yaml +++ b/.github/workflows/trivy-config-mode.yaml @@ -1,159 +1,159 @@ -name: "[SCANS] Trivy Dockerfile Config Scan" -run-name: "[SCANS] Trivy Dockerfile Config Scan (by @${{ github.actor }} via ${{ github.event_name }})" -on: - workflow_dispatch: - inputs: - dockerfile-path: - description: 'Path to the Dockerfile to scan' - required: true - type: string - trivy-report-format: - description: 'Trivy report format (e.g. json, table, sarif)' - required: false - default: 'json' - type: string - trivy-config-path: - description: 'Path to custom Trivy config file' - required: false - type: string - severity-levels: - description: 'Severity levels to check (comma-separated, e.g. LOW,MEDIUM,HIGH,CRITICAL)' - required: false - default: 'HIGH,CRITICAL' - type: string - output-report-path: - description: 'Path to save the Trivy output report (e.g. reports/trivy.json)' - required: true - type: string - name: - description: 'Additional part of scan name' - required: false - type: string - default: '1' - workflow_call: - inputs: - dockerfile-path: - description: 'Path to the Dockerfile to scan' - required: true - type: string - trivy-report-format: - description: 'Trivy report format (e.g. json, table, sarif)' - required: false - default: 'json' - type: string - trivy-config-path: - description: 'Path to custom Trivy config file' - required: false - type: string - severity-levels: - description: 'Severity levels to check (comma-separated, e.g. LOW,MEDIUM,HIGH,CRITICAL)' - required: false - default: 'HIGH,CRITICAL' - type: string - output-report-path: - description: 'Path to save the Trivy output report (e.g. reports/trivy.json)' - required: true - type: string - name: - description: 'Additional part of scan name' - required: false - type: string - default: '1' -permissions: {} - -jobs: - trivy-config-scan: - name: Trivy Dockerfile Config Scan - runs-on: ubuntu-latest - permissions: - contents: read - steps: - - name: Check out dlstreamer repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 - with: - persist-credentials: false - path: dlstreamer-repo - - - name: Install Trivy from Aqua Security APT repo - run: | - sudo apt-get update - sudo apt-get install -y gnupg lsb-release wget apt-transport-https curl jq - curl -fsSL https://aquasecurity.github.io/trivy-repo/deb/public.key | sudo gpg --dearmor -o /usr/share/keyrings/trivy.gpg - echo "deb [signed-by=/usr/share/keyrings/trivy.gpg] https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -cs) main" | \ - sudo tee /etc/apt/sources.list.d/trivy.list > /dev/null - sudo apt-get update - sudo apt-get install -y trivy - - - name: Create report directory if needed - env: - output_report_path: ${{ inputs.output-report-path }} - run: | - mkdir -p "$(dirname "${output_report_path}")" - - - name: Run Trivy config scan and save output - env: - dockerfile_path: ${{ inputs.dockerfile-path }} - output_report_path: ${{ inputs.output-report-path }} - severity_levels: ${{ inputs.severity-levels }} - trivy_report_format: ${{ inputs.trivy-report-format }} - trivy_config_path: ${{ inputs.trivy-config-path }} - run: | - echo "🔍 Scanning: ${dockerfile_path}" - echo "📁 Saving report to: ${output_report_path}" - trivy_cmd="trivy config \ - --severity \"${severity_levels}\" \ - --format \"${trivy_report_format}\" \ - --output \"${output_report_path}\" \ - \"${dockerfile_path}\"" - - # Add Trivy config path if provided - if [ -n "$trivy_config_path" ]; then - trivy_cmd="$trivy_cmd --config \"$trivy_config_path\"" - fi - - eval $trivy_cmd - - echo "📄 Report preview:" - head -n 100 "${output_report_path}" - - - name: Check Trivy scan failures - env: - output_report_path: ${{ inputs.output-report-path }} - trivy_report_format: ${{ inputs.trivy-report-format }} - run: | - if [ "${trivy_report_format}" = "json" ]; then - echo "📊 Parsing JSON report..." - - # Check if file contains valid JSON - if ! jq empty "${output_report_path}" 2>/dev/null; then - echo "âš ī¸ Warning: Report is not valid JSON. Skipping failure count check." - exit 0 - fi - - # Extract failure count, handle null/missing fields - FAILURE_COUNT=$(jq '[.Results[]? | .MisconfSummary?.Failures? // 0] | add // 0' "${output_report_path}") - echo "Failures found: $FAILURE_COUNT" - - if [ "$FAILURE_COUNT" -gt 0 ]; then - echo "❌ Found $FAILURE_COUNT failures! Failing the job." - cat "${output_report_path}" - exit 1 - else - echo "✅ No failures found. Passing." - fi - else - echo "â„šī¸ Non-JSON format (${trivy_report_format}) detected. Skipping automated failure count." - echo "Please review the report artifact manually." - fi - - - name: Upload Trivy report as artifact - if: always () - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 - with: - name: trivy-config-report-${{ inputs.name }} - path: ${{ inputs.output-report-path }} - - - name: Clean up - env: - output_report_path: ${{ inputs.output-report-path }} - run: rm -rf "${output_report_path}" +name: "[SCANS] Trivy Dockerfile Config Scan" +run-name: "[SCANS] Trivy Dockerfile Config Scan (by @${{ github.actor }} via ${{ github.event_name }})" +on: + workflow_dispatch: + inputs: + dockerfile-path: + description: 'Path to the Dockerfile to scan' + required: true + type: string + trivy-report-format: + description: 'Trivy report format (e.g. json, table, sarif)' + required: false + default: 'json' + type: string + trivy-config-path: + description: 'Path to custom Trivy config file' + required: false + type: string + severity-levels: + description: 'Severity levels to check (comma-separated, e.g. LOW,MEDIUM,HIGH,CRITICAL)' + required: false + default: 'HIGH,CRITICAL' + type: string + output-report-path: + description: 'Path to save the Trivy output report (e.g. reports/trivy.json)' + required: true + type: string + name: + description: 'Additional part of scan name' + required: false + type: string + default: '1' + workflow_call: + inputs: + dockerfile-path: + description: 'Path to the Dockerfile to scan' + required: true + type: string + trivy-report-format: + description: 'Trivy report format (e.g. json, table, sarif)' + required: false + default: 'json' + type: string + trivy-config-path: + description: 'Path to custom Trivy config file' + required: false + type: string + severity-levels: + description: 'Severity levels to check (comma-separated, e.g. LOW,MEDIUM,HIGH,CRITICAL)' + required: false + default: 'HIGH,CRITICAL' + type: string + output-report-path: + description: 'Path to save the Trivy output report (e.g. reports/trivy.json)' + required: true + type: string + name: + description: 'Additional part of scan name' + required: false + type: string + default: '1' +permissions: {} + +jobs: + trivy-config-scan: + name: Trivy Dockerfile Config Scan + runs-on: ubuntu-latest + permissions: + contents: read + steps: + - name: Check out dlstreamer repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #5.0.0 + with: + persist-credentials: false + path: dlstreamer-repo + + - name: Install Trivy from Aqua Security APT repo + run: | + sudo apt-get update + sudo apt-get install -y gnupg lsb-release wget apt-transport-https curl jq + curl -fsSL https://aquasecurity.github.io/trivy-repo/deb/public.key | sudo gpg --dearmor -o /usr/share/keyrings/trivy.gpg + echo "deb [signed-by=/usr/share/keyrings/trivy.gpg] https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -cs) main" | \ + sudo tee /etc/apt/sources.list.d/trivy.list > /dev/null + sudo apt-get update + sudo apt-get install -y trivy + + - name: Create report directory if needed + env: + output_report_path: ${{ inputs.output-report-path }} + run: | + mkdir -p "$(dirname "${output_report_path}")" + + - name: Run Trivy config scan and save output + env: + dockerfile_path: ${{ inputs.dockerfile-path }} + output_report_path: ${{ inputs.output-report-path }} + severity_levels: ${{ inputs.severity-levels }} + trivy_report_format: ${{ inputs.trivy-report-format }} + trivy_config_path: ${{ inputs.trivy-config-path }} + run: | + echo "🔍 Scanning: ${dockerfile_path}" + echo "📁 Saving report to: ${output_report_path}" + trivy_cmd="trivy config \ + --severity \"${severity_levels}\" \ + --format \"${trivy_report_format}\" \ + --output \"${output_report_path}\" \ + \"${dockerfile_path}\"" + + # Add Trivy config path if provided + if [ -n "$trivy_config_path" ]; then + trivy_cmd="$trivy_cmd --config \"$trivy_config_path\"" + fi + + eval $trivy_cmd + + echo "📄 Report preview:" + head -n 100 "${output_report_path}" + + - name: Check Trivy scan failures + env: + output_report_path: ${{ inputs.output-report-path }} + trivy_report_format: ${{ inputs.trivy-report-format }} + run: | + if [ "${trivy_report_format}" = "json" ]; then + echo "📊 Parsing JSON report..." + + # Check if file contains valid JSON + if ! jq empty "${output_report_path}" 2>/dev/null; then + echo "âš ī¸ Warning: Report is not valid JSON. Skipping failure count check." + exit 0 + fi + + # Extract failure count, handle null/missing fields + FAILURE_COUNT=$(jq '[.Results[]? | .MisconfSummary?.Failures? // 0] | add // 0' "${output_report_path}") + echo "Failures found: $FAILURE_COUNT" + + if [ "$FAILURE_COUNT" -gt 0 ]; then + echo "❌ Found $FAILURE_COUNT failures! Failing the job." + cat "${output_report_path}" + exit 1 + else + echo "✅ No failures found. Passing." + fi + else + echo "â„šī¸ Non-JSON format (${trivy_report_format}) detected. Skipping automated failure count." + echo "Please review the report artifact manually." + fi + + - name: Upload Trivy report as artifact + if: always () + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #4.6.2 + with: + name: trivy-config-report-${{ inputs.name }} + path: ${{ inputs.output-report-path }} + + - name: Clean up + env: + output_report_path: ${{ inputs.output-report-path }} + run: rm -rf "${output_report_path}" diff --git a/dependencies/opencv.cmake b/dependencies/opencv.cmake index f64d7c4f..c178d169 100644 --- a/dependencies/opencv.cmake +++ b/dependencies/opencv.cmake @@ -10,31 +10,34 @@ include(ExternalProject) set(DESIRED_VERSION 4.12.0) ExternalProject_Add( - opencv-contrib - PREFIX ${CMAKE_BINARY_DIR}/opencv-contrib + opencv_contrib + PREFIX ${CMAKE_BINARY_DIR}/opencv_contrib URL https://github.com/opencv/opencv_contrib/archive/${DESIRED_VERSION}.zip URL_MD5 2eecff53ebd74f6291108247d365cb61 + DOWNLOAD_EXTRACT_TIMESTAMP TRUE CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" TEST_COMMAND "" ) -ExternalProject_Get_Property(opencv-contrib SOURCE_DIR) +ExternalProject_Get_Property(opencv_contrib SOURCE_DIR) ExternalProject_Add( opencv PREFIX ${CMAKE_BINARY_DIR}/opencv URL https://github.com/opencv/opencv/archive/${DESIRED_VERSION}.zip URL_MD5 6bc2ed099ff31451242f37a5f2dac0cf + DOWNLOAD_EXTRACT_TIMESTAMP TRUE CMAKE_GENERATOR Ninja TEST_COMMAND "" - CMAKE_ARGS -DBUILD_TESTS=OFF + INSTALL_COMMAND ninja install + CMAKE_ARGS -DBUILD_TESTS=OFF -DCMAKE_BUILD_TYPE=Release -DOPENCV_GENERATE_PKGCONFIG=ON -DBUILD_SHARED_LIBS=ON - -DBUILD_PERF_TESTS=OFF - -DBUILD_EXAMPLES=OFF - -DBUILD_opencv_apps=OFF + -DBUILD_PERF_TESTS=OFF + -DBUILD_EXAMPLES=OFF + -DBUILD_opencv_apps=OFF -DOPENCV_EXTRA_MODULES_PATH=${SOURCE_DIR}/modules -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_BINARY_DIR}/opencv-bin -DWITH_VA=ON diff --git a/dependencies/rdkafka.cmake b/dependencies/rdkafka.cmake index b1646a21..82061c76 100644 --- a/dependencies/rdkafka.cmake +++ b/dependencies/rdkafka.cmake @@ -14,11 +14,12 @@ ExternalProject_Add( PREFIX ${CMAKE_BINARY_DIR}/rdkafka URL https://github.com/edenhill/librdkafka/archive/v${DESIRED_VERSION}.tar.gz URL_MD5 86ed3acd2f9d9046250dea654cee59a8 + DOWNLOAD_EXTRACT_TIMESTAMP TRUE BUILD_IN_SOURCE 1 BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" - CONFIGURE_COMMAND ./configure + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/rdkafka-bin ) diff --git a/docker/fedora41/fedora41.Dockerfile b/docker/fedora41/fedora41.Dockerfile index 9640f8ab..71537579 100644 --- a/docker/fedora41/fedora41.Dockerfile +++ b/docker/fedora41/fedora41.Dockerfile @@ -67,7 +67,7 @@ RUN \ python3-gobject-devel python3-devel tbb gnupg2 unzip gflags-devel openssl-devel openssl-devel-engine \ gobject-introspection-devel x265-devel x264-devel libde265-devel libgudev-devel libusb1 libusb1-devel nasm python3-virtualenv \ cairo-devel cairo-gobject-devel libXt-devel mesa-libGLES-devel wayland-protocols-devel libcurl-devel which \ - libssh2-devel cmake git valgrind numactl libvpx-devel opus-devel libsrtp-devel libXv-devel paho-c-devel \ + libssh2-devel cmake git valgrind numactl libvpx-devel opus-devel libsrtp-devel libXv-devel paho-c-devel ocl-icd-devel \ kernel-headers pmix pmix-devel hwloc hwloc-libs hwloc-devel libxcb-devel libX11-devel libatomic intel-media-driver libsoup3 && \ dnf clean all diff --git a/docs/source/dev_guide/advanced_install/advanced_install_guide_compilation.md b/docs/source/dev_guide/advanced_install/advanced_install_guide_compilation.md index 0f73c842..f6fd7ad7 100644 --- a/docs/source/dev_guide/advanced_install/advanced_install_guide_compilation.md +++ b/docs/source/dev_guide/advanced_install/advanced_install_guide_compilation.md @@ -30,7 +30,7 @@ Follow the instructions in libssh2-1-dev cmake git valgrind numactl libvpx-dev libopus-dev libsrtp2-dev libxv-dev \ linux-libc-dev libpmix2t64 libhwloc15 libhwloc-plugins libxcb1-dev libx11-xcb-dev \ ffmpeg libpaho-mqtt-dev libopencv-dev libpostproc-dev libavfilter-dev libavdevice-dev \ - libswscale-dev libswresample-dev libavutil-dev libavformat-dev libavcodec-dev libtbb12 libxml2-dev libopencv-dev \ + libswscale-dev libswresample-dev libavutil-dev libavformat-dev libavcodec-dev libtbb12 libxml2-dev \ ocl-icd-opencl-dev ``` @@ -158,7 +158,7 @@ there is need to install [OpenVINO GenAI archive](https://docs.openvino.ai/2025/ ```bash - curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2025.4/linux/openvino_genai_ubuntu24_2025.4.0.0_x86_64.tar.gz | tar -xz && + wget -O- https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2025.4/linux/openvino_genai_ubuntu24_2025.4.0.0_x86_64.tar.gz | tar -xz && sudo mv openvino_genai_ubuntu24_2025.4.0.0_x86_64 /opt/intel/openvino_genai source /opt/intel/openvino_genai/setupvars.sh ``` @@ -168,7 +168,7 @@ there is need to install [OpenVINO GenAI archive](https://docs.openvino.ai/2025/ ```bash -curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2025.4/linux/openvino_genai_ubuntu22_2025.4.0.0_x86_64.tar.gz | tar -xz && +wget -O- https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2025.4/linux/openvino_genai_ubuntu22_2025.4.0.0_x86_64.tar.gz | tar -xz && mv openvino_genai_ubuntu22_2025.4.0.0_x86_64 /opt/intel/openvino_genai source /opt/intel/openvino_genai/setupvars.sh ``` diff --git a/src/monolithic/gst/elements/CMakeLists.txt b/src/monolithic/gst/elements/CMakeLists.txt index b8c3d606..3f652a4e 100644 --- a/src/monolithic/gst/elements/CMakeLists.txt +++ b/src/monolithic/gst/elements/CMakeLists.txt @@ -6,7 +6,7 @@ set (TARGET_NAME "elements") -find_package(OpenCV REQUIRED core imgproc) +find_package(OpenCV REQUIRED core imgproc calib3d) find_package(PkgConfig REQUIRED) pkg_check_modules(GSTREAMER gstreamer-1.0>=1.16 REQUIRED) pkg_check_modules(GSTVIDEO gstreamer-video-1.0>=1.16 REQUIRED) diff --git a/src/monolithic/gst/elements/gvarealsense/CMakeLists.txt b/src/monolithic/gst/elements/gvarealsense/CMakeLists.txt index a81cf64f..56c9266b 100644 --- a/src/monolithic/gst/elements/gvarealsense/CMakeLists.txt +++ b/src/monolithic/gst/elements/gvarealsense/CMakeLists.txt @@ -7,7 +7,7 @@ set(TARGET_NAME "gvarealsense") pkg_search_module(VA va libva REQUIRED) -find_package(OpenCV REQUIRED core imgproc) +find_package(OpenCV REQUIRED core imgproc calib3d) find_package(realsense2) file(GLOB REALSENSE_SRC @@ -18,7 +18,7 @@ file(GLOB REALSENSE_HEADERS *.h ) -add_library(gvarealsense SHARED gvarealsense.cpp gvarealsense.h gvarealsense_utils.cpp gvarealsense_utils.h +add_library(gvarealsense SHARED gvarealsense.cpp gvarealsense.h gvarealsense_utils.cpp gvarealsense_utils.h gvarealsense_pcd.h gvarealsense_pcd.cpp ) set_compile_flags(${TARGET_NAME})