diff --git a/.github/actions/aws_s3_helper/action.yml b/.github/actions/aws_s3_helper/action.yml
index 6ae81d7c5da1d..5c94170ea1138 100644
--- a/.github/actions/aws_s3_helper/action.yml
+++ b/.github/actions/aws_s3_helper/action.yml
@@ -1,90 +1,90 @@
-name: AWS S3 Helper
-description: Upload and download files from AWS S3
-
-inputs:
- s3_bucket:
- description: S3 Bucket Name
- required: true
- local_file:
- description: Local file paths
- required: false
- default: ../artifacts/file_list.txt
- download_file:
- description: Download file paths
- required: false
- default: ''
- mode:
- description: Mode of operation (upload/download)
- required: true
- default: single-upload
-
-outputs:
- presigned_url:
- description: Pre-signed URL for the uploaded file
- value: ${{ steps.sync-data.outputs.presigned_url }}
-
-runs:
- using: "composite"
- steps:
- - name: Sync Data
- id: sync-data
- shell: bash
- env:
- UPLOAD_LOCATION: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.workflow }}/${{ github.head_ref != '' && github.head_ref || github.run_id }}/
- run: |
- echo "::group::$(printf '__________ %-100s' 'Process' | tr ' ' _)"
- case "${{ inputs.mode }}" in
- multi-upload)
- echo "Uploading files to S3 bucket..."
- first_line=true
- # Start the JSON object
- echo "{" > ${{ github.workspace }}/presigned_urls.json
- while IFS= read -r file; do
- if [ -f "$file" ]; then
- echo "Uploading $file..."
- aws s3 cp "$file" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}
- echo "Uploaded $file to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}"
- echo "Creating Pre-signed URL for $file..."
- filename=$(basename "$file")
- presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}$filename --expires-in 3600)
- if [ "$first_line" = true ]; then
- first_line=false
- else
- echo "," >> ${{ github.workspace }}/presigned_urls.json
- fi
- # Append the pre-signed URL to the file
- echo " \"${file}\": \"${presigned_url}\"" >> ${{ github.workspace }}/presigned_urls.json
- echo "Pre-signed URL for $file: $presigned_url"
- else
- echo "Warning: $file does not exist or is not a regular file."
- fi
- done < "${{ inputs.local_file }}"
- # Close the JSON object
- echo "}" >> ${{ github.workspace }}/presigned_urls.json
- ;;
- single-upload)
- echo "Uploading single file to S3 bucket..."
- aws s3 cp "${{ inputs.local_file }}" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}
- echo "Uploaded ${{ inputs.local_file }} to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}"
- echo "Creating Pre-signed URL for ${{ inputs.local_file }}..."
- presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}${{ inputs.local_file }} --expires-in 3600)
- echo "presigned_url=${presigned_url}" >> "$GITHUB_OUTPUT"
- ;;
- download)
- #Download The required file from s3
- echo "Downloading files from S3 bucket..."
- aws s3 sync s3://${{ inputs.s3_bucket }}/${{ inputs.download_file }} .
- ;;
- *)
- echo "Invalid mode. Use 'upload' or 'download'."
- exit 1
- ;;
- esac
-
- - name: Upload artifacts
- if: ${{ inputs.mode == 'multi-upload' }}
- uses: actions/upload-artifact@v4
- with:
- name: presigned_urls.json
- path: ${{ github.workspace }}/presigned_urls.json
+name: AWS S3 Helper
+description: Upload and download files from AWS S3
+
+inputs:
+ s3_bucket:
+ description: S3 Bucket Name
+ required: true
+ local_file:
+ description: Local file paths
+ required: false
+ default: ../artifacts/file_list.txt
+ download_file:
+ description: Download file paths
+ required: false
+ default: ''
+ mode:
+ description: Mode of operation (upload/download)
+ required: true
+ default: single-upload
+
+outputs:
+ presigned_url:
+ description: Pre-signed URL for the uploaded file
+ value: ${{ steps.sync-data.outputs.presigned_url }}
+
+runs:
+ using: "composite"
+ steps:
+ - name: Sync Data
+ id: sync-data
+ shell: bash
+ env:
+ UPLOAD_LOCATION: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.workflow }}/${{ github.head_ref != '' && github.head_ref || github.run_id }}/
+ run: |
+ echo "::group::$(printf '__________ %-100s' 'Process' | tr ' ' _)"
+ case "${{ inputs.mode }}" in
+ multi-upload)
+ echo "Uploading files to S3 bucket..."
+ first_line=true
+ # Start the JSON object
+ echo "{" > ${{ github.workspace }}/presigned_urls.json
+ while IFS= read -r file; do
+ if [ -f "$file" ]; then
+ echo "Uploading $file..."
+ aws s3 cp "$file" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}
+ echo "Uploaded $file to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}"
+ echo "Creating Pre-signed URL for $file..."
+ filename=$(basename "$file")
+ presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}$filename --expires-in 3600)
+ if [ "$first_line" = true ]; then
+ first_line=false
+ else
+ echo "," >> ${{ github.workspace }}/presigned_urls.json
+ fi
+ # Append the pre-signed URL to the file
+ echo " \"${file}\": \"${presigned_url}\"" >> ${{ github.workspace }}/presigned_urls.json
+ echo "Pre-signed URL for $file: $presigned_url"
+ else
+ echo "Warning: $file does not exist or is not a regular file."
+ fi
+ done < "${{ inputs.local_file }}"
+ # Close the JSON object
+ echo "}" >> ${{ github.workspace }}/presigned_urls.json
+ ;;
+ single-upload)
+ echo "Uploading single file to S3 bucket..."
+ aws s3 cp "${{ inputs.local_file }}" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}
+ echo "Uploaded ${{ inputs.local_file }} to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}"
+ echo "Creating Pre-signed URL for ${{ inputs.local_file }}..."
+ presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}${{ inputs.local_file }} --expires-in 3600)
+ echo "presigned_url=${presigned_url}" >> "$GITHUB_OUTPUT"
+ ;;
+ download)
+ #Download The required file from s3
+ echo "Downloading files from S3 bucket..."
+ aws s3 sync s3://${{ inputs.s3_bucket }}/${{ inputs.download_file }} .
+ ;;
+ *)
+ echo "Invalid mode. Use 'upload' or 'download'."
+ exit 1
+ ;;
+ esac
+
+ - name: Upload artifacts
+ if: ${{ inputs.mode == 'multi-upload' }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: presigned_urls.json
+ path: ${{ github.workspace }}/presigned_urls.json
retention-days: 1
\ No newline at end of file
diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml
index 95c45bb32aa99..c998e4d418cf9 100644
--- a/.github/actions/build/action.yml
+++ b/.github/actions/build/action.yml
@@ -1,37 +1,37 @@
-name: Build workspace
-description: Build workspace
-
-inputs:
- docker_image:
- description: Docker image
- required: true
- default: kmake-image:latest
-
-runs:
- using: "composite"
- steps:
- - name: Download artifacts
- shell: bash
- run: |
- mkdir -p ../artifacts && \
- wget -O ../artifacts/ramdisk.gz https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm64/1379/initramfs-test-image-qemuarm64-20230321073831-1379.rootfs.cpio.gz && \
- wget -O ../artifacts/systemd-boot-efi.deb http://ports.ubuntu.com/pool/universe/s/systemd/systemd-boot-efi_255.4-1ubuntu8_arm64.deb && \
- dpkg-deb -xv ../artifacts/systemd-boot-efi.deb ../artifacts/systemd
-
- - name: Make
- shell: bash
- run: |
- docker run -i --rm \
- --user $(id -u):$(id -g) \
- --workdir="$PWD" \
- -v "$(dirname $PWD)":"$(dirname $PWD)" \
- ${{ inputs.docker_image }} bash -c "
- make O=../kobj defconfig
- make O=../kobj -j$(nproc)
- make O=../kobj -j$(nproc) dir-pkg INSTALL_MOD_STRIP=1
- "
-
- - name: Package DLKM into ramdisk
- shell: bash
- run: |
+name: Build workspace
+description: Build workspace
+
+inputs:
+ docker_image:
+ description: Docker image
+ required: true
+ default: kmake-image:latest
+
+runs:
+ using: "composite"
+ steps:
+ - name: Download artifacts
+ shell: bash
+ run: |
+ mkdir -p ../artifacts && \
+ wget -O ../artifacts/ramdisk.gz https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm64/1379/initramfs-test-image-qemuarm64-20230321073831-1379.rootfs.cpio.gz && \
+ wget -O ../artifacts/systemd-boot-efi.deb http://ports.ubuntu.com/pool/universe/s/systemd/systemd-boot-efi_255.4-1ubuntu8_arm64.deb && \
+ dpkg-deb -xv ../artifacts/systemd-boot-efi.deb ../artifacts/systemd
+
+ - name: Make
+ shell: bash
+ run: |
+ docker run -i --rm \
+ --user $(id -u):$(id -g) \
+ --workdir="$PWD" \
+ -v "$(dirname $PWD)":"$(dirname $PWD)" \
+ ${{ inputs.docker_image }} bash -c "
+ make O=../kobj defconfig
+ make O=../kobj -j$(nproc)
+ make O=../kobj -j$(nproc) dir-pkg INSTALL_MOD_STRIP=1
+ "
+
+ - name: Package DLKM into ramdisk
+ shell: bash
+ run: |
(cd ../kobj/tar-install ; find lib/modules | cpio -o -H newc -R +0:+0 | gzip -9 >> ../../artifacts/ramdisk.gz)
\ No newline at end of file
diff --git a/.github/actions/lava_job_render/action.yml b/.github/actions/lava_job_render/action.yml
index 186a19275e388..be9b2587685e4 100644
--- a/.github/actions/lava_job_render/action.yml
+++ b/.github/actions/lava_job_render/action.yml
@@ -1,154 +1,154 @@
-name: Test Action
-inputs:
- docker_image:
- description: Docker image
- required: true
- default: kmake-image:latest
-
-runs:
- using: "composite"
- steps:
- - name: Process presigned_urls.json
- id: process_urls
- uses: actions/github-script@v7
- with:
- script: |
- const fs = require('fs');
- const p = require('path');
- // Helper function to find URL by filename
- function findUrlByFilename(filename) {
- for (const [path, url] of Object.entries(data)) {
- if (path.endsWith(filename)) {
- return url;
- }
- }
- return null;
- }
- const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json');
- if (fs.existsSync(filePath)) {
- console.log("File exists");
- } else {
- console.log("File does not exist");
- core.setFailed(`File not found: ${filePath}`);
- }
- // Read the JSON file
- const data = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
- // Extract URLs into variables
- const modulesTarUrl = findUrlByFilename('modules.tar.xz');
- const imageUrl = findUrlByFilename('Image');
- const vmlinuxUrl = findUrlByFilename('vmlinux');
- const dtbUrl = findUrlByFilename('qcs6490-rb3gen2.dtb');
- // Set outputs
- core.setOutput('modules_url', modulesTarUrl);
- core.setOutput('image_url', imageUrl);
- core.setOutput('vmlinux_url', vmlinuxUrl);
- core.setOutput('dtb_url', dtbUrl);
- console.log(`Modules URL: ${modulesTarUrl}`);
- console.log(`Image URL: ${imageUrl}`);
- console.log(`Vmlinux URL: ${vmlinuxUrl}`);
- console.log(`Dtb URL: ${dtbUrl}`);
-
- - name: Create metadata.json
- id: create_metadata
- shell: bash
- run: |
- echo "Creating job definition"
- # Create the job definition using the processed URLs
- cd ../job_render
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \
- ${{ inputs.docker_image }} \
- jq '.artifacts["dtbs/qcom/qcs6490-rb3gen2.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json
-
- - name: Upload metadata.json
- id: upload_metadata
- uses: ./.github/actions/aws_s3_helper
- with:
- local_file: ../job_render/data/metadata.json
- s3_bucket: qli-prd-kernel-gh-artifacts
- mode: single-upload
-
- - name: Create template json
- shell: bash
- run: |
- echo "Creating job definition"
- metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}"
- vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}"
- image_url="${{ steps.process_urls.outputs.image_url }}"
- modules_url="${{ steps.process_urls.outputs.modules_url }}"
- # Create the job definition using the processed URLs
- cd ../job_render
- # using metadata_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e metadata_url="$metadata_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
- # using image_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e image_url="$image_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
- # using vmlinux_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e vmlinux_url="$vmlinux_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.vmlinux = env.vmlinux_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
- # using modules_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e modules_url="$modules_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
-
- - name: Update firmware and ramdisk
- shell: bash
- run: |
- cd ../job_render
- ramdisk_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)"
- firmware_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-firmware-rb3gen2-image-qcom-armv8a.cpio.gz --expires 7600)"
- # using ramdisk_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e ramdisk_url="$ramdisk_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
-
- # using firmware_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e firmware_url="$firmware_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
-
- - name: Create lava_job_definition
- shell: bash
- run: |
- cd ../job_render
- mkdir renders
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- ${{ inputs.docker_image }} \
- sh -c 'export BOOT_METHOD=fastboot && \
- export TARGET=qcs6490-rb3gen2 && \
- export TARGET_DTB=qcs6490-rb3gen2 && \
- python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json'
\ No newline at end of file
+name: Test Action
+inputs:
+ docker_image:
+ description: Docker image
+ required: true
+ default: kmake-image:latest
+
+runs:
+ using: "composite"
+ steps:
+ - name: Process presigned_urls.json
+ id: process_urls
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const fs = require('fs');
+ const p = require('path');
+ // Helper function to find URL by filename
+ function findUrlByFilename(filename) {
+ for (const [path, url] of Object.entries(data)) {
+ if (path.endsWith(filename)) {
+ return url;
+ }
+ }
+ return null;
+ }
+ const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json');
+ if (fs.existsSync(filePath)) {
+ console.log("File exists");
+ } else {
+ console.log("File does not exist");
+ core.setFailed(`File not found: ${filePath}`);
+ }
+ // Read the JSON file
+ const data = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
+ // Extract URLs into variables
+ const modulesTarUrl = findUrlByFilename('modules.tar.xz');
+ const imageUrl = findUrlByFilename('Image');
+ const vmlinuxUrl = findUrlByFilename('vmlinux');
+ const dtbUrl = findUrlByFilename('qcs6490-rb3gen2.dtb');
+ // Set outputs
+ core.setOutput('modules_url', modulesTarUrl);
+ core.setOutput('image_url', imageUrl);
+ core.setOutput('vmlinux_url', vmlinuxUrl);
+ core.setOutput('dtb_url', dtbUrl);
+ console.log(`Modules URL: ${modulesTarUrl}`);
+ console.log(`Image URL: ${imageUrl}`);
+ console.log(`Vmlinux URL: ${vmlinuxUrl}`);
+ console.log(`Dtb URL: ${dtbUrl}`);
+
+ - name: Create metadata.json
+ id: create_metadata
+ shell: bash
+ run: |
+ echo "Creating job definition"
+ # Create the job definition using the processed URLs
+ cd ../job_render
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts["dtbs/qcom/qcs6490-rb3gen2.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json
+
+ - name: Upload metadata.json
+ id: upload_metadata
+ uses: ./.github/actions/aws_s3_helper
+ with:
+ local_file: ../job_render/data/metadata.json
+ s3_bucket: qli-prd-kernel-gh-artifacts
+ mode: single-upload
+
+ - name: Create template json
+ shell: bash
+ run: |
+ echo "Creating job definition"
+ metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}"
+ vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}"
+ image_url="${{ steps.process_urls.outputs.image_url }}"
+ modules_url="${{ steps.process_urls.outputs.modules_url }}"
+ # Create the job definition using the processed URLs
+ cd ../job_render
+ # using metadata_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e metadata_url="$metadata_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+ # using image_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e image_url="$image_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+ # using vmlinux_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e vmlinux_url="$vmlinux_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.vmlinux = env.vmlinux_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+ # using modules_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e modules_url="$modules_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+
+ - name: Update firmware and ramdisk
+ shell: bash
+ run: |
+ cd ../job_render
+ ramdisk_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)"
+ firmware_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-firmware-rb3gen2-image-qcom-armv8a.cpio.gz --expires 7600)"
+ # using ramdisk_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e ramdisk_url="$ramdisk_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+
+ # using firmware_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e firmware_url="$firmware_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+
+ - name: Create lava_job_definition
+ shell: bash
+ run: |
+ cd ../job_render
+ mkdir renders
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ ${{ inputs.docker_image }} \
+ sh -c 'export BOOT_METHOD=fastboot && \
+ export TARGET=qcs6490-rb3gen2 && \
+ export TARGET_DTB=qcs6490-rb3gen2 && \
+ python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json'
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 0ff23fc40f4fc..ad565b476c3a1 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -1,75 +1,75 @@
-name: _build
-on:
- workflow_call:
- inputs:
- docker_image:
- description: Docker image
- type: string
- required: true
-
-jobs:
- build:
- runs-on:
- group: GHA-Kernel-SelfHosted-RG
- labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ]
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ github.ref }}
- fetch-depth: 0
-
- - name: Pull docker image
- uses: ./.github/actions/pull_docker_image
- with:
- image: ${{ inputs.docker_image }}
- github_token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Build workspace
- id: build_workspace
- uses: ./.github/actions/build
- with:
- docker_image: ${{ inputs.docker_image }}
-
- - name: Create file list for artifacts upload
- run: |
- touch ../artifacts/file_list.txt
- tar -cJf modules.tar.xz ../kobj/tar-install/lib/modules/
- echo "modules.tar.xz" >> ../artifacts/file_list.txt
- echo "../kobj/arch/arm64/boot/Image" >> ../artifacts/file_list.txt
- echo "../kobj/vmlinux" >> ../artifacts/file_list.txt
- echo "../kobj/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dtb" >> ../artifacts/file_list.txt
-
- - name: Upload artifacts
- uses: ./.github/actions/aws_s3_helper
- with:
- s3_bucket: qli-prd-kernel-gh-artifacts
- aws_access_key_id: ${{ secrets.AWSKEYID }}
- aws_secret_access_key: ${{ secrets.AWSACCESSKEY }}
- local_file: ../artifacts/file_list.txt
- mode: multi-upload
-
- - name: Clean up
- run: |
- rm -rf ../artifacts
- rm -rf ../kobj
- rm -rf modules.tar.xz
-
- - name: Update summary
- if: success() || failure()
- shell: bash
- run: |
- if [ ${{ steps.build_workspace.outcome }} == 'success' ]; then
- echo "Build was successful"
- summary=":heavy_check_mark: Build Success"
- else
- echo "Build failed"
- summary=":x: Build Failed"
- fi
- SUMMARY='
- Build Summary
-
- '${summary}'
-
- '
- echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY
\ No newline at end of file
+name: _build
+on:
+ workflow_call:
+ inputs:
+ docker_image:
+ description: Docker image
+ type: string
+ required: true
+
+jobs:
+ build:
+ runs-on:
+ group: GHA-Kernel-SelfHosted-RG
+ labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ]
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.ref }}
+ fetch-depth: 0
+
+ - name: Pull docker image
+ uses: ./.github/actions/pull_docker_image
+ with:
+ image: ${{ inputs.docker_image }}
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Build workspace
+ id: build_workspace
+ uses: ./.github/actions/build
+ with:
+ docker_image: ${{ inputs.docker_image }}
+
+ - name: Create file list for artifacts upload
+ run: |
+ touch ../artifacts/file_list.txt
+ tar -cJf modules.tar.xz ../kobj/tar-install/lib/modules/
+ echo "modules.tar.xz" >> ../artifacts/file_list.txt
+ echo "../kobj/arch/arm64/boot/Image" >> ../artifacts/file_list.txt
+ echo "../kobj/vmlinux" >> ../artifacts/file_list.txt
+ echo "../kobj/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dtb" >> ../artifacts/file_list.txt
+
+ - name: Upload artifacts
+ uses: ./.github/actions/aws_s3_helper
+ with:
+ s3_bucket: qli-prd-kernel-gh-artifacts
+ aws_access_key_id: ${{ secrets.AWSKEYID }}
+ aws_secret_access_key: ${{ secrets.AWSACCESSKEY }}
+ local_file: ../artifacts/file_list.txt
+ mode: multi-upload
+
+ - name: Clean up
+ run: |
+ rm -rf ../artifacts
+ rm -rf ../kobj
+ rm -rf modules.tar.xz
+
+ - name: Update summary
+ if: success() || failure()
+ shell: bash
+ run: |
+ if [ ${{ steps.build_workspace.outcome }} == 'success' ]; then
+ echo "Build was successful"
+ summary=":heavy_check_mark: Build Success"
+ else
+ echo "Build failed"
+ summary=":x: Build Failed"
+ fi
+ SUMMARY='
+ Build Summary
+
+ '${summary}'
+
+ '
+ echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 384f4bf7a7ed7..669e35beb347f 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,98 +1,98 @@
-name: _test
-description: Run tests on LAVA
-
-on:
- workflow_call:
- inputs:
- docker_image:
- description: Docker image
- type: string
- required: true
- default: kmake-image:latest
-
-jobs:
- test:
- runs-on:
- group: GHA-Kernel-SelfHosted-RG
- labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ]
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ github.ref }}
- fetch-depth: 0
-
- - name: Pull docker image
- uses: ./.github/actions/pull_docker_image
- with:
- image: ${{ inputs.docker_image }}
- github_token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Download URLs list
- uses: actions/download-artifact@v4
- with:
- name: presigned_urls.json
- path: ${{ github.workspace }}
-
- - name: Clone lava job render scripts
- run: cd .. && git clone https://github.com/qualcomm-linux/job_render
-
- - name: Create lava job definition
- uses: ./.github/actions/lava_job_render
- id: create_job_definition
- with:
- docker_image: ${{ inputs.docker_image }}
-
- - name: Submit lava job
- id: submit_job
- run: |
- cd ../job_render
- job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml")
- job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id"
- echo "job_id=$job_id" >> $GITHUB_OUTPUT
- echo "job_url=$job_url" >> $GITHUB_OUTPUT
- echo "Lava Job: $job_url"
- echo "JOB_ID=$job_id" >> $GITHUB_ENV
-
- - name: Check lava job results
- id: check_job
- run: |
- STATE=""
- while [ "$STATE" != "Finished" ]; do
- state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep state)
- STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
- echo "Current status: $STATE"
- sleep 30
- done
- health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep Health)
- HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
- if [[ "$HEALTH" == "Complete" ]]; then
- echo "Lava job passed."
- summary=":heavy_check_mark: Lava job passed."
- echo "summary=$summary" >> $GITHUB_OUTPUT
- exit 0
- else
- echo "Lava job failed."
- summary=":x: Lava job failed."
- echo "summary=$summary" >> $GITHUB_OUTPUT
- exit 1
- fi
-
- - name: Update summary
- if: success() || failure()
- shell: bash
- run: |
- if [ "${{ steps.create_job_definition.conclusion }}" == 'failure' ]; then
- status=":x: Test job failed"
- else
- status="${{ steps.check_job.outputs.summary }}"
- job_url="${{ steps.submit_job.outputs.job_url }}"
- job_id="${{ steps.submit_job.outputs.job_id }}"
- fi
- SUMMARY='
- '${status}'
-
- JOB ID: '${job_id}'
-
- '
+name: _test
+description: Run tests on LAVA
+
+on:
+ workflow_call:
+ inputs:
+ docker_image:
+ description: Docker image
+ type: string
+ required: true
+ default: kmake-image:latest
+
+jobs:
+ test:
+ runs-on:
+ group: GHA-Kernel-SelfHosted-RG
+ labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ]
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.ref }}
+ fetch-depth: 0
+
+ - name: Pull docker image
+ uses: ./.github/actions/pull_docker_image
+ with:
+ image: ${{ inputs.docker_image }}
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Download URLs list
+ uses: actions/download-artifact@v4
+ with:
+ name: presigned_urls.json
+ path: ${{ github.workspace }}
+
+ - name: Clone lava job render scripts
+ run: cd .. && git clone https://github.com/qualcomm-linux/job_render
+
+ - name: Create lava job definition
+ uses: ./.github/actions/lava_job_render
+ id: create_job_definition
+ with:
+ docker_image: ${{ inputs.docker_image }}
+
+ - name: Submit lava job
+ id: submit_job
+ run: |
+ cd ../job_render
+ job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml")
+ job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id"
+ echo "job_id=$job_id" >> $GITHUB_OUTPUT
+ echo "job_url=$job_url" >> $GITHUB_OUTPUT
+ echo "Lava Job: $job_url"
+ echo "JOB_ID=$job_id" >> $GITHUB_ENV
+
+ - name: Check lava job results
+ id: check_job
+ run: |
+ STATE=""
+ while [ "$STATE" != "Finished" ]; do
+ state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep state)
+ STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
+ echo "Current status: $STATE"
+ sleep 30
+ done
+ health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep Health)
+ HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
+ if [[ "$HEALTH" == "Complete" ]]; then
+ echo "Lava job passed."
+ summary=":heavy_check_mark: Lava job passed."
+ echo "summary=$summary" >> $GITHUB_OUTPUT
+ exit 0
+ else
+ echo "Lava job failed."
+ summary=":x: Lava job failed."
+ echo "summary=$summary" >> $GITHUB_OUTPUT
+ exit 1
+ fi
+
+ - name: Update summary
+ if: success() || failure()
+ shell: bash
+ run: |
+ if [ "${{ steps.create_job_definition.conclusion }}" == 'failure' ]; then
+ status=":x: Test job failed"
+ else
+ status="${{ steps.check_job.outputs.summary }}"
+ job_url="${{ steps.submit_job.outputs.job_url }}"
+ job_id="${{ steps.submit_job.outputs.job_id }}"
+ fi
+ SUMMARY='
+ '${status}'
+
+ JOB ID: '${job_id}'
+
+ '
echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY
\ No newline at end of file