Skip to content

Commit

Permalink
testing: lints partial fixes
Browse files Browse the repository at this point in the history
This commit resolves the following lint rules:

- 'key-order[task]'
- 'name[missing]'
- 'yaml[trailing-spaces]'
- 'risky-shell-pipe'
- 'args[module]'

Also removes the branch constraint to run
the Ansible lint checks from any fork. This
is useful because in each contributor's fork
when pushing specific named branches the checks
will be executed, a future improvement could be
to run all the lints checks from a single environment
like `tox -e linters` instead of consuming the GH action.

Partially-solves: openshift-psap#10
  • Loading branch information
ccamacho committed Aug 25, 2023
1 parent 690108f commit 5c1bf59
Show file tree
Hide file tree
Showing 36 changed files with 280 additions and 178 deletions.
3 changes: 0 additions & 3 deletions .github/workflows/ansible-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,8 @@ name: Run ansible-lint

# Controls when the action will run.
on:
# Triggers the workflow on push or pull request events but only for the main branch
pull_request:
branches: [main]
push:
branches: [main]
schedule:
- cron: '0 */8 * * *'
# Allows you to run this workflow manually from the Actions tab
Expand Down
7 changes: 0 additions & 7 deletions config/ansible-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,8 @@ skip_list:
- 'command-instead-of-module'
- 'command-instead-of-shell'
- 'deprecated-local-action'
- 'key-order[task]'
- 'jinja[spacing]'
- 'no-free-form'
- 'chema[meta]'
- 'name[missing]'
- 'var-naming[no-reserved]'
- 'var-naming[no-role-prefix]'
- 'var-naming[pattern]'
Expand All @@ -29,15 +26,11 @@ skip_list:
- 'yaml[indentation]'
- 'yaml[key-duplicates]'
- 'yaml[line-length]'
- 'yaml[new-line-at-end-of-file]'
- 'yaml[octal-values]'
- 'yaml[trailing-spaces]'
- 'yaml[truthy]'
- 'name[template]'
- 'name[casing]'
- 'risky-file-permissions'
- 'risky-shell-pipe'
- 'ignore-errors'
- 'no-changed-when'
- 'fqcn'
- 'args[module]'
37 changes: 19 additions & 18 deletions roles/benchmarking/benchmarking_run_mlperf_ssd/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,24 +7,24 @@
command: oc get nodes -l kubernetes.io/hostname={{ benchmarking_run_mlperf_ssd_node_hostname }} -oname

- name: Ensure that the coco dataset PVC exists
command:
oc get pvc/{{ benchmarking_run_mlperf_ssd_dataset_pvc_name }}
shell: |
oc get pvc/{{ benchmarking_run_mlperf_ssd_dataset_pvc_name }} \
-n {{ benchmarking_run_mlperf_ssd_namespace }}
- name: Fetch the coco dataset PVC definition (debug)
shell:
oc get pvc/{{ benchmarking_run_mlperf_ssd_pvc_name }}
-n {{ benchmarking_run_mlperf_ssd_namespace }}
-oyaml
shell: |
oc get pvc/{{ benchmarking_run_mlperf_ssd_pvc_name }} \
-n {{ benchmarking_run_mlperf_ssd_namespace }} \
-oyaml \
> {{ artifact_extra_logs_dir }}/pvc_coco-dataset.yml
- name: Create the entrypoint ConfigMap file
shell:
oc create cm {{ benchmarking_run_mlperf_ssd_entrypoint_cm_name }}
--from-file="{{ benchmarking_mlperf_ssd_entrypoint }}"
-n {{ benchmarking_run_mlperf_ssd_namespace }}
--dry-run=client
-oyaml
shell: |
oc create cm {{ benchmarking_run_mlperf_ssd_entrypoint_cm_name }} \
--from-file="{{ benchmarking_mlperf_ssd_entrypoint }}" \
-n {{ benchmarking_run_mlperf_ssd_namespace }} \
--dry-run=client \
-oyaml \
> {{ artifact_extra_logs_dir }}/000_configmap_run-mlperf-ssd_entrypoint.yml
- name: Create the entrypoint ConfigMap resource
Expand All @@ -45,7 +45,8 @@
command:
oc create -f "{{ artifact_extra_logs_dir }}/001_pod_run-mlperf-ssd.yml"

- block:
- name: Make sure the benchmark completes
block:
- name: Wait for the benchmark completion
command:
oc get pod/{{ benchmarking_run_mlperf_ssd_name }}
Expand All @@ -63,8 +64,8 @@

always:
- name: Store the logs of benchmark execution (for post-processing)
shell:
oc logs pod/{{ benchmarking_run_mlperf_ssd_name }} -n {{ benchmarking_run_mlperf_ssd_namespace }}
shell: |
oc logs pod/{{ benchmarking_run_mlperf_ssd_name }} -n {{ benchmarking_run_mlperf_ssd_namespace }} \
> "{{ artifact_extra_logs_dir }}/pod_run-mlperf-ssd.log"
failed_when: false

Expand All @@ -73,13 +74,13 @@
echo "{{ wait_benchmark_pod_cmd.stdout }}" > "{{ artifact_extra_logs_dir }}/pod_run-mlperf-ssd.status"

- name: Store the description of benchmark execution (debug)
shell:
oc describe pod/{{ benchmarking_run_mlperf_ssd_name }} -n {{ benchmarking_run_mlperf_ssd_namespace }}
shell: |
oc describe pod/{{ benchmarking_run_mlperf_ssd_name }} -n {{ benchmarking_run_mlperf_ssd_namespace }} \
> "{{ artifact_extra_logs_dir }}/pod_run-mlperf-ssd.descr"
failed_when: false

- name: Get average sample rate
shell:
shell: |
set -o pipefail;
cat "{{ artifact_extra_logs_dir }}/pod_run-mlperf-ssd.log" | grep avg. | tail -n1 | awk '{ print $NF " samples/sec" }' > "{{ artifact_dir }}/benchmarking_run_ssd_sample_rate.log";
cp {{ artifact_dir }}/benchmarking_run_ssd_sample_rate.log {{ artifact_extra_logs_dir }}/benchmarking_run_ssd_sample_rate.log
Expand Down
30 changes: 13 additions & 17 deletions roles/cluster/cluster_capture_environment/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,34 +1,34 @@
- name: Store OpenShift version identifier
shell:
shell: |
set -o pipefail;
oc version -o json
| jq --raw-output '.openshiftVersion'
oc version -o json \
| jq --raw-output '.openshiftVersion' \
> {{ artifact_extra_logs_dir }}/ocp.version
- name: Store OpenShift YAML version
shell:
oc version -oyaml
shell: |
oc version -oyaml \
> {{ artifact_extra_logs_dir }}/ocp_version.yml
- name: Store OpenShift YAML clusterversion
shell:
oc get clusterversion/version -oyaml
shell: |
oc get clusterversion/version -oyaml \
> {{ artifact_extra_logs_dir }}/ocp_clusterversion.yml
# ---

- name: Store the OpenShift nodes
shell:
oc get nodes -owide
shell: |
oc get nodes -owide \
> {{ artifact_extra_logs_dir }}/nodes.status;
oc get nodes -oyaml
oc get nodes -oyaml \
> {{ artifact_extra_logs_dir }}/nodes.yaml;
- name: Store the OpenShift machines
shell:
oc get machines -n openshift-machine-api -owide
shell: |
oc get machines -n openshift-machine-api -owide \
> {{ artifact_extra_logs_dir }}/machines.status;
oc get machines -n openshift-machine-api -oyaml
oc get machines -n openshift-machine-api -oyaml \
> {{ artifact_extra_logs_dir }}/machines.yaml;
# ---
Expand All @@ -37,8 +37,6 @@
command:
git describe HEAD --long --always
register: git_version
args:
warn: false # don't warn about using git here

- name: Store ci-artifact version from Git
copy:
Expand All @@ -50,8 +48,6 @@
command:
git show --no-patch
register: git_show
args:
warn: false # don't warn about using git here

- name: Store ci-artifact last git commit
copy:
Expand Down
10 changes: 5 additions & 5 deletions roles/cluster/cluster_create_osd/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -117,11 +117,11 @@
oc get nodes > "{{ artifact_extra_logs_dir }}/nodes.status"

- name: Set the desired worker node count

Check warning on line 119 in roles/cluster/cluster_create_osd/tasks/main.yml

View workflow job for this annotation

GitHub Actions / build

jinja[spacing]

Jinja2 spacing could be improved: ocm edit machinepool {{ cluster_create_osd_machinepool_name }} --cluster={{ cluster_create_osd_cluster_name }} --replicas={{ [2,cluster_create_osd_compute_nodes|int]|max }}
command:
ocm edit machinepool
{{ cluster_create_osd_machinepool_name }}
--cluster={{ cluster_create_osd_cluster_name }}
--replicas={{ [2, cluster_create_osd_compute_nodes|int] |max }}
shell: |
ocm edit machinepool \
{{ cluster_create_osd_machinepool_name }} \
--cluster={{ cluster_create_osd_cluster_name }} \
--replicas={{ [2,cluster_create_osd_compute_nodes|int]|max }}
- name: Wait for the desired worker node count
shell: |
Expand Down
8 changes: 4 additions & 4 deletions roles/cluster/cluster_deploy_aws_efs/tasks/aws-efs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -96,11 +96,11 @@

- name: Populate the tags dict
set_fact:
tags: "{{ tags | default({}) | combine ({ item.key : item.value }) }}"
tags: "{{ tags | default({}) | combine({item.key: item.value}) }}"
with_items:
- { 'key': 'Name' , 'value': '{{ cluster_name_cmd.stdout }}'}
- { 'key': '{{ cluster_name_tag_cmd.stdout }}' , 'value': 'owned'}
- { 'key': 'Purpose', 'value': ''}
- {'key': 'Name' , 'value': '{{ cluster_name_cmd.stdout }}'}
- {'key': '{{ cluster_name_tag_cmd.stdout }}' , 'value': 'owned'}
- {'key': 'Purpose', 'value': ''}

- name: Get the SecurityGroup content
amazon.aws.ec2_group_info:
Expand Down
7 changes: 6 additions & 1 deletion roles/cluster/cluster_deploy_ldap/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,12 @@
# Workaround until `ocm` supports the --insecure flag

- name: Get the cluster ID
shell:
shell: |
set -o pipefail
ocm describe cluster "{{ cluster_deploy_ldap_cluster_name }}" --json | jq .id -r
register: cluster_id_cmd
args:
executable: /bin/bash

- name: Create the IDP resource manually
shell: |
Expand All @@ -157,6 +160,8 @@
url="https://api.openshift.com/api/clusters_mgmt/v1/clusters/{{ cluster_id_cmd.stdout }}/identity_providers";
cat "{{ cluster_deploy_ldap_ocm_idp }}" | envsubst > /tmp/idp.json
ocm post "$url" --body /tmp/idp.json
args:
executable: /bin/bash

- name: Get the API URL
command: oc whoami --show-server
Expand Down
68 changes: 34 additions & 34 deletions roles/cluster/cluster_deploy_operator/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@
-n {{ cluster_deploy_operator_catalog_namespace }}

- name: Capture the state of the CatalogSource (debug)
shell:
oc get -oyaml CatalogSource/{{ cluster_deploy_operator_catalog }}
-n {{ cluster_deploy_operator_catalog_namespace }}
-oyaml
shell: |
oc get -oyaml CatalogSource/{{ cluster_deploy_operator_catalog }} \
-n {{ cluster_deploy_operator_catalog_namespace }} \
-oyaml \
> {{ artifact_extra_logs_dir }}/catalogsource.yml
failed_when: false

Expand All @@ -61,33 +61,33 @@
delay: 30

- name: Save the operator PackageManifest YAML (debug)
shell:
oc get packagemanifests/{{ cluster_deploy_operator_manifest_name }}
-n {{ cluster_deploy_operator_catalog_namespace }}
-oyaml
shell: |
oc get packagemanifests/{{ cluster_deploy_operator_manifest_name }} \
-n {{ cluster_deploy_operator_catalog_namespace }} \
-oyaml \
> {{ artifact_extra_logs_dir }}/operator_packagemanifest.yml
- name: Store the operator PackageManifest JSON
shell:
oc get packagemanifests/{{ cluster_deploy_operator_manifest_name }}
-n {{ cluster_deploy_operator_catalog_namespace }}
-ojson
shell: |
oc get packagemanifests/{{ cluster_deploy_operator_manifest_name }} \
-n {{ cluster_deploy_operator_catalog_namespace }} \
-ojson \
> {{ artifact_extra_logs_dir }}/operator_packagemanifest.json
rescue:
- name: Capture the Catalog Operator logs (debug)
shell:
oc logs deployment.apps/catalog-operator
-n openshift-operator-lifecycle-manager
shell: |
oc logs deployment.apps/catalog-operator \
-n openshift-operator-lifecycle-manager \
> {{ artifact_extra_logs_dir }}/catalog_operator.log
failed_when: false

- name: Indicate where the Catalog-operator logs have been saved
debug: msg="The logs of Catalog Operator have been saved in {{ artifact_extra_logs_dir }}/catalog_operator.log"

- name: Mark the failure as flake
shell:
echo "Failed because the {{ cluster_deploy_operator_manifest_name }} PackageManifest is not available"
shell: |
echo "Failed because the {{ cluster_deploy_operator_manifest_name }} PackageManifest is not available" \
> "{{ artifact_extra_logs_dir }}/FLAKE"
- name: Failed because the operator could not be found in the CatalogSource
Expand Down Expand Up @@ -231,7 +231,8 @@
- name: Instantiate the Subscription
command: oc apply -f "{{ artifact_extra_logs_dir }}/src/002_sub.yml"

- block:
- name: Make sure the InstallPlan is deployed
block:
- name: Find the operator InstallPlan
command:
oc get InstallPlan
Expand Down Expand Up @@ -276,33 +277,32 @@
fail: msg="ClusterServiceVersion install not successful ({{ operator_csv_phase.stdout }})"
when: operator_csv_phase.stdout != "Succeeded"

always:
- name: Store the YAML of the operator CSV that was installed (debug)
shell:
oc get ClusterServiceVersion/{{ operator_csv_name }}
-oyaml
-n "{{ cluster_deploy_operator_namespace }}"
> {{ artifact_extra_logs_dir }}/operator_csv.yml
- name: Store the YAML of the subscription
shell:
oc get -f "{{ artifact_extra_logs_dir }}/src/002_sub.yml"
-oyaml
-n "{{ cluster_deploy_operator_namespace }}"
> {{ artifact_extra_logs_dir }}/operator_sub.yml
rescue:
- name: Capture the Catalog Operator logs (debug)
shell:
shell: |
oc logs deployment.apps/catalog-operator
-n openshift-operator-lifecycle-manager
> {{ artifact_extra_logs_dir }}/catalog_operator.log
failed_when: false

- name: Indicate where the Catalog-operator logs have been saved
debug: msg="The logs of Catalog Operator have been saved in {{ artifact_extra_logs_dir }}/catalog_operator.log"

- name: Failed because the operator could not be installed from the CatalogSource
fail: msg="Failed because the operator could not be installed from the CatalogSource"

always:
- name: Store the YAML of the operator CSV that was installed (debug)
shell: |
oc get ClusterServiceVersion/{{ operator_csv_name }} \
-oyaml \
-n "{{ cluster_deploy_operator_namespace }}" \
> {{ artifact_extra_logs_dir }}/operator_csv.yml
- name: Store the YAML of the subscription
shell: |
oc get -f "{{ artifact_extra_logs_dir }}/src/002_sub.yml" \
-oyaml \
-n "{{ cluster_deploy_operator_namespace }}" \
> {{ artifact_extra_logs_dir }}/operator_sub.yml
- name: Deploy the operator CustomResource from its ClusterServiceVersion
include_tasks: deploy_cr.yml
when: cluster_deploy_operator_deploy_cr | bool
5 changes: 4 additions & 1 deletion roles/cluster/cluster_ensure_machineset/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
---
- name: "Check if the cluster already has a {{ machineset_instance_type }} machineset"
shell:
shell: |
set -o pipefail
oc get machineset -n openshift-machine-api
{% if machineset_name | length > 0 %}
-ojson | jq '.items[] | select(.spec.template.spec.providerSpec.value.instanceType=="{{ machineset_instance_type }}" and .metadata.name=="{{ machineset_name }}") | .metadata.name' -r
{% else %}
-o=jsonpath='{.items[?(@.spec.template.spec.providerSpec.value.instanceType=="{{ machineset_instance_type }}")].metadata.name}'
{% endif %}
register: cluster_has_machineset
args:
executable: /bin/bash

- name: Delete the machineset if it is set but has the wrong instance type
when: not cluster_has_machineset.stdout and machineset_name | length > 0
Expand Down
Loading

0 comments on commit 5c1bf59

Please sign in to comment.