Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
23d988d
Updated CI to replace enic with :wqven
sunil-parida Nov 22, 2025
cf880c0
Update virtual-integration.yml
sunil-parida Nov 22, 2025
3bc29b1
Update virtual-integration.yml
sunil-parida Nov 22, 2025
f20464b
Update virtual-integration.yml
sunil-parida Nov 22, 2025
245cee7
Update virtual-integration.yml
sunil-parida Nov 22, 2025
a73bb30
Update virtual-integration.yml
sunil-parida Nov 22, 2025
9b3719d
Update virtual-integration.yml
sunil-parida Nov 22, 2025
ed97eb8
Update virtual-integration.yml
sunil-parida Nov 23, 2025
889f2b2
Update virtual-integration.yml
sunil-parida Nov 23, 2025
005adfe
Update libvirt-setup.sh
sunil-parida Nov 23, 2025
1606b2b
Update virtual-integration.yml
sunil-parida Nov 23, 2025
c1054d9
Update virtual-integration.yml
sunil-parida Nov 24, 2025
4dd399d
Update virtual-integration.yml
sunil-parida Nov 24, 2025
9cd5873
Merge branch 'main' into ven-with-kind-v1
sunil-parida Nov 24, 2025
5e36d1a
Update virtual-integration.yml
sunil-parida Dec 2, 2025
199d0c8
Merge branch 'main' into ven-with-kind-v1
sunil-parida Dec 2, 2025
7ba9218
Revert "Update virtual-integration.yml"
sunil-parida Dec 2, 2025
f0b72fc
Merge branch 'main' into ven-with-kind-v1
sunil-parida Dec 2, 2025
f4619e8
Merge branch 'main' into ven-with-kind-v1
sunil-parida Dec 3, 2025
e3142e3
Merge branch 'main' into ven-with-kind-v1
sunil-parida Dec 11, 2025
5c47089
Merge branch 'main' into ven-with-kind-v1
sunil-parida Dec 13, 2025
42d7071
Merge branch 'main' into ven-with-kind-v1
sunil-parida Jan 12, 2026
184722d
updated git token variable
sunil-parida Jan 12, 2026
4db8162
Merge branch 'main' into ven-with-kind-v1
sunil-parida Jan 13, 2026
43c4964
Update virtual-integration.yml
sunil-parida Jan 13, 2026
3f672c2
Update virtual-integration.yml
sunil-parida Jan 13, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
196 changes: 65 additions & 131 deletions .github/workflows/virtual-integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -486,6 +486,16 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: Install DNSmasq
run: |
cd ci/ven
./dnsmasq-setup.sh "kind.internal" setup
- name: Install Libvirt
env:
LIBVIRT_DEFAULT_URI: 'qemu:///system'
run: |
cd ci/ven
./libvirt-setup.sh
- name: Deploy Kind Orchestrator
id: deploy-kind-orchestrator
uses: ./.github/actions/deploy_kind
Expand All @@ -501,10 +511,15 @@ jobs:
if: always()
uses: ./.github/actions/collect_diagnostics
timeout-minutes: 15
- name: Config DNSmasq
run: |
cd ci/ven
./dnsmasq-setup.sh "kind.internal" config
- name: Run policy compliance tests
run: mage test:policyCompliance
- name: Run image pull policy compliance tests
run: mage test:imagePullPolicyCompliance

- name: Setup Sample Org and Project with default users
id: default-mt-setup
run: mage tenantUtils:createDefaultMtSetup
Expand All @@ -522,152 +537,71 @@ jobs:
- name: Create default user and run e2e tests
run: mage devUtils:createDefaultUser test:e2e

- name: "Test Observability SRE Exporter w/o ENiC"
- name: "Test Observability SRE Exporter w/o VEN"
env:
ORCH_DEFAULT_PASSWORD: ${{ secrets.ORCH_DEFAULT_PASSWORD }}
run: |
mage test:e2eSreObservabilityNoEnic

- name: Deploy ENiC
if: ${{ always() && steps.deploy-kind-orchestrator.conclusion == 'success' && steps.default-mt-setup.conclusion == 'success' }}
- name: Read test-automation dependency Version
id: read-test-automation-version
shell: bash
timeout-minutes: 5
env:
ORCH_ORG: sample-org
ORCH_PROJECT: sample-project
ORCH_USER: sample-project-onboarding-user
ORCH_USER_API: sample-project-api-user
EDGE_MANAGEABILITY_FRAMEWORK_REV: ${{ env.GIT_HASH }}
run: |
mage devUtils:deployEnic 1 dev

# wait until SN and UUID are available
mage devUtils:getEnicSerialNumber
mage devUtils:getEnicUUID

# then get then and store them in the env
UUID=$(kubectl exec -it -n enic enic-0 -c edge-node -- bash -c "dmidecode -s system-uuid")
SN=$(kubectl exec -it -n enic enic-0 -c edge-node -- bash -c "dmidecode -s system-serial-number")
echo "EN_UUID=$UUID" >> "$GITHUB_ENV"
echo "EN_SN=$SN" >> "$GITHUB_ENV"

- name: Print current EN UUID and SN
if: ${{ always() && steps.deploy-kind-orchestrator.conclusion == 'success' && steps.default-mt-setup.conclusion == 'success' }}
run: echo "Current ENiC has UUID ${{ env.EN_UUID }} and SN ${{ env.EN_SN }}"

- name: UI E2E Tests
if: ${{ always() && steps.deploy-kind-orchestrator.conclusion == 'success' && steps.default-mt-setup.conclusion == 'success' }}
uses: ./.github/actions/cypress
cat /proc/cpuinfo
version=$(yq '.test-automation.version' ${{ github.workspace }}/.test-dependencies.yaml | tr -d '\n' | xargs)
echo $version
echo "version=$version" >> $GITHUB_OUTPUT
- name: Checkout edge-manage-test-automation repository with submodules
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: open-edge-platform/edge-manage-test-automation
ref: ${{ steps.read-test-automation-version.outputs.version }}
path: edge-manage-test-automation
submodules: 'recursive'
token: ${{ secrets.SYS_EMF_GH_TOKEN }}
en_serial_number: ${{ env.EN_SN }}
en_uuid: ${{ env.EN_UUID }}
infra: "cypress/e2e/infra/locations.cy.ts,cypress/e2e/infra/new-host-provision.cy.ts,cypress/e2e/infra/verify-host.cy.ts"

- name: "Test Observability Public Endpoints"
env:
ORCH_DEFAULT_PASSWORD: ${{ secrets.ORCH_DEFAULT_PASSWORD }}
run: |
mage test:e2eObservability

- name: "Test Observability Orchestrator Stack"
env:
ORCH_DEFAULT_PASSWORD: ${{ secrets.ORCH_DEFAULT_PASSWORD }}
run: |
mage test:e2eOrchObservability

# TODO - The base extension deployment including observability stack has been removed. Need to revist this test stack later and see how to proceed further.
# - name: "Test Observability EdgeNode Stack"
# env:
# ORCH_DEFAULT_PASSWORD: ${{ secrets.ORCH_DEFAULT_PASSWORD }}
# run: |
# mage test:e2eENObservability

# TODO - The base extension deployment including observability stack has been removed. Need to revist this test stack later and see how to proceed further.
# - name: "Test Observability Alerts"
# env:
# ORCH_DEFAULT_PASSWORD: ${{ secrets.ORCH_DEFAULT_PASSWORD }}
# run: |
# mage test:e2eAlertsObservability

# TODO - The base extension deployment including observability stack has been removed. Need to revist this test stack later and see how to proceed further.
# - name: "Test Observability SRE Exporter"
# env:
# ORCH_DEFAULT_PASSWORD: ${{ secrets.ORCH_DEFAULT_PASSWORD }}
# run: |
# mage test:e2eSreObservability

- name: deploy, register & provision new ENiC instance # previous instance was deatuhorized by the cluster delete in UI tests
if: ${{ always() && steps.deploy-kind-orchestrator.conclusion == 'success' && steps.default-mt-setup.conclusion == 'success' }}
shell: bash
timeout-minutes: 5
env:
ORCH_ORG: sample-org
ORCH_PROJECT: sample-project
ORCH_USER: sample-project-onboarding-user
ORCH_USER_API: sample-project-api-user
EDGE_MANAGEABILITY_FRAMEWORK_REV: ${{ env.GIT_HASH }}
run: |
# delete the previous ENiC instance
kubectl delete ns enic

# deploy a new ENiC instance
mage devUtils:deployEnic 1 dev

# wait until SN and UUID are available
mage devUtils:getEnicSerialNumber
mage devUtils:getEnicUUID

# then get then and store them in the env
UUID=$(kubectl exec -it -n enic enic-0 -c edge-node -- bash -c "dmidecode -s system-uuid")
SN=$(kubectl exec -it -n enic enic-0 -c edge-node -- bash -c "dmidecode -s system-serial-number")
echo "EN_UUID=$UUID" >> "$GITHUB_ENV"
echo "EN_SN=$SN" >> "$GITHUB_ENV"

# register the ENiC in the orchestrator
ORCH_USER=${ORCH_USER_API} mage devUtils:registerEnic enic-0

# provision the ENiC
ORCH_USER=${ORCH_USER_API} mage devUtils:provisionEnic enic-0
sleep 5
mage devUtils:WaitForEnic

- name: Wait for cluster agent to be ready
if: ${{ always() && steps.deploy-kind-orchestrator.conclusion == 'success' }}
persist-credentials: false
- name: Setup virtual environment
working-directory: edge-manage-test-automation
run: |
mkdir -p smoke-test/logs
kubectl exec -n enic enic-0 -c edge-node -- bash -c "journalctl -x" > smoke-test/logs/enic-journalctl-pre.log
kubectl exec -n enic enic-0 -c edge-node -- bash -c "journalctl -xeu cluster-agent" > smoke-test/logs/enic-cluster-agent-pre.log
timeout 5m kubectl exec -n enic enic-0 -c edge-node -- bash -c "journalctl -f" | grep -m 1 "Cluster Agent state update"

- name: Run AO / CO smoke test
if: ${{ always() && steps.deploy-kind-orchestrator.conclusion == 'success' && steps.default-mt-setup.conclusion == 'success' }}
git submodule update --init --recursive
make asdf-install
make venv_edge-manage-test-automation
# install required versions for Pico
pushd repos/ven/pico
asdf install
sudo apt-get install xsltproc
popd
- name: Run Golden Suite Robot Framework Tests
id: robot-tests
working-directory: edge-manage-test-automation
timeout-minutes: 45
env:
PROJECT: sample-project
NODE_UUID: ${{ env.EN_UUID }}
EDGE_MGR_USER: sample-project-edge-mgr
EDGE_INFRA_USER: sample-project-api-user
run: |
echo "Running AO / CO smoke test..."
mage test:clusterOrchSmokeTest

- name: Collect smoke test logs
if: always()
REQUESTS_CA_BUNDLE: /usr/local/share/ca-certificates/orch-ca.crt
LIBVIRT_DEFAULT_URI: 'qemu:///system'
run: |
mkdir -p smoke-test/logs
kubectl logs -n orch-app -l app=app-deployment-api -c app-deployment-api --tail=-1 > smoke-test/logs/app-deployment-api.log
kubectl logs -n orch-app -l app=app-deployment-manager --tail=-1 > smoke-test/logs/app-deployment-manager.log
kubectl logs -n orch-app -l app=app-resource-manager -c app-resource-manager --tail=-1 > smoke-test/logs/app-resource-manager.log
kubectl logs -n orch-app -l app.kubernetes.io/name=app-orch-catalog --tail=-1 > smoke-test/logs/application-catalog.log
kubectl exec -n enic enic-0 -c edge-node -- bash -c "journalctl -xeu cluster-agent" > smoke-test/logs/enic-cluster-agent-post.log

- name: Upload smoke test logs
kubectl -n orch-platform get secrets platform-keycloak -o yaml || true
KC_ADMIN_PWD=$(kubectl -n orch-platform get secrets platform-keycloak -o jsonpath='{.data.admin-password}' | base64 -d)
# Add the password to the orchestrator config
yq eval ".orchestrator.admin_password = \"${KC_ADMIN_PWD}\"" -i orchestrator-configs/kind.yaml
yq eval '.infra.host.edgenode.hw_info.libvirt_pool_name = "default"' -i tests/core_foundation/data/cf_data_1_ven_VEN-libvirt_microvisor-nonrt.yaml
yq eval '.infra.host.edgenode.hw_info.libvirt_network_name = "default"' -i tests/core_foundation/data/cf_data_1_ven_VEN-libvirt_microvisor-nonrt.yaml
cat tests/core_foundation/data/cf_data_1_ven_VEN-libvirt_microvisor-nonrt.yaml || true
source venv_edge-manage-test-automation/bin/activate
robot -L DEBUG --pythonpath . \
--name "Golden Suite: Core Foundation" \
-d robot_output/core_foundation \
-V orchestrator-configs/kind.yaml \
--exitonfailure \
--exclude cf6 \
--exclude cf8 \
tests/core_foundation/core_foundation.robot
- name: Upload test artifacts
if: always()
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: smoke-test
name: kind-${{ github.event_name }}-${{ github.event.number }}-robot-report
path: |
smoke-test/logs/*
edge-manage-test-automation/robot_output/**/*

deploy-on-prem:
permissions:
Expand Down
151 changes: 151 additions & 0 deletions ci/ven/dnsmasq-setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
#!/bin/bash
# SPDX-FileCopyrightText: (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -x

# Check if at least two arguments are provided
if [ -z "$1" ] || [ -z "$2" ]; then
echo "Usage: $0 <cluster.fqdn> {setup|config}"
exit 1
fi

CLUSTER_FQDN="$1"
ACTION="$2"

# Get interface name with 10 network IP
interface_name=$(ip -o -4 addr show | awk '$4 ~ /^10\./ {print $2}')

# Check if any interfaces were found
if [ -n "$interface_name" ]; then
echo "Interfaces with IP addresses starting with 10.:"
echo "$interface_name"
else
echo "No interfaces found with IP addresses starting with 10."
ip -o -4 addr show
exit 1
fi

# Get the IP address of the specified interface
ip_address=$(ip -4 addr show "$interface_name" | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
if [ -z "$ip_address" ]; then
echo "No IP address found for $interface_name. Exiting."
exit 1
fi

function setup_dns() {
sudo apt update -y
resolvectl status
dns_server_ip=$(resolvectl status | awk '/Current DNS Server/ {print $4}')
sudo apt install -y dnsmasq
sudo systemctl disable systemd-resolved
sudo systemctl stop systemd-resolved

# Backup the original dnsmasq configuration file
echo "Backing up the original dnsmasq configuration..."
sudo cp /etc/dnsmasq.conf /etc/dnsmasq.conf.bak

# Get the current hostname
current_hostname=$(hostname)
echo "Adding hostname '$current_hostname' to /etc/hosts..."
echo "$ip_address $current_hostname" | sudo tee -a /etc/hosts > /dev/null

# Unlink and recreate /etc/resolv.conf
echo "Configuring /etc/resolv.conf..."
sudo unlink /etc/resolv.conf
cat <<EOL | sudo tee /etc/resolv.conf
nameserver 127.0.0.1
options trust-ad
EOL

# Configure dnsmasq
echo "Configuring dnsmasq..."
cat <<EOL | sudo tee /etc/dnsmasq.conf
interface=$interface_name
bind-interfaces
dhcp-option=interface:$interface_name,option:dns-server,$ip_address
server=$ip_address
server=$dns_server_ip
server=8.8.8.8
EOL
}

function update_host_lb_ip() {
# Get LoadBalancer IPs from Kubernetes services
argocd_lb=$(kubectl get svc -n argocd | grep LoadBalancer | awk '{print $4}')
tinkerbell_lb=$(kubectl get svc -n orch-boots | grep LoadBalancer | awk '{print $4}')
cluster_lb=$(kubectl get svc -n orch-gateway | grep LoadBalancer | awk '{print $4}')

# Check if LoadBalancer IPs were found
if [ -z "$argocd_lb" ] || [ -z "$tinkerbell_lb" ] || [ -z "$cluster_lb" ]; then
echo "One or more LoadBalancer IPs could not be retrieved. Exiting."
exit 1
fi

# Uncomment these lines if you want to use the host IP instead
# argocd_lb=$ip_address
# tinkerbell_lb=$ip_address
# cluster_lb=$ip_address

cat <<EOL | sudo tee /etc/dnsmasq.d/cluster-hosts-dns.conf
address=/tinkerbell-nginx.$CLUSTER_FQDN/$tinkerbell_lb
address=/argo.$CLUSTER_FQDN/$argocd_lb
address=/$CLUSTER_FQDN/$cluster_lb
address=/alerting-monitor.$CLUSTER_FQDN/$cluster_lb
address=/api.$CLUSTER_FQDN/$cluster_lb
address=/app-orch.$CLUSTER_FQDN/$cluster_lb
address=/app-service-proxy.$CLUSTER_FQDN/$cluster_lb
address=/cluster-orch-edge-node.$CLUSTER_FQDN/$cluster_lb
address=/cluster-orch-node.$CLUSTER_FQDN/$cluster_lb
address=/cluster-orch.$CLUSTER_FQDN/$cluster_lb
address=/connect-gateway.$CLUSTER_FQDN/$cluster_lb
address=/fleet.$CLUSTER_FQDN/$cluster_lb
address=/infra-node.$CLUSTER_FQDN/$cluster_lb
address=/infra.$CLUSTER_FQDN/$cluster_lb
address=/keycloak.$CLUSTER_FQDN/$cluster_lb
address=/license-node.$CLUSTER_FQDN/$cluster_lb
address=/log-query.$CLUSTER_FQDN/$cluster_lb
address=/logs-node.$CLUSTER_FQDN/$cluster_lb
address=/metadata.$CLUSTER_FQDN/$cluster_lb
address=/metrics-node.$CLUSTER_FQDN/$cluster_lb
address=/observability-admin.$CLUSTER_FQDN/$cluster_lb
address=/observability-ui.$CLUSTER_FQDN/$cluster_lb
address=/onboarding-node.$CLUSTER_FQDN/$cluster_lb
address=/onboarding-stream.$CLUSTER_FQDN/$cluster_lb
address=/onboarding.$CLUSTER_FQDN/$cluster_lb
address=/orchestrator-license.$CLUSTER_FQDN/$cluster_lb
address=/rancher.$CLUSTER_FQDN/$cluster_lb
address=/registry-oci.$CLUSTER_FQDN/$cluster_lb
address=/registry.$CLUSTER_FQDN/$cluster_lb
address=/release.$CLUSTER_FQDN/$cluster_lb
address=/telemetry-node.$CLUSTER_FQDN/$cluster_lb
address=/telemetry.$CLUSTER_FQDN/$cluster_lb
address=/tinkerbell-server.$CLUSTER_FQDN/$cluster_lb
address=/update-node.$CLUSTER_FQDN/$cluster_lb
address=/update.$CLUSTER_FQDN/$cluster_lb
address=/vault.$CLUSTER_FQDN/$cluster_lb
address=/vnc.$CLUSTER_FQDN/$cluster_lb
address=/web-ui.$CLUSTER_FQDN/$cluster_lb
address=/ws-app-service-proxy.$CLUSTER_FQDN/$cluster_lb
EOL
}

# Main execution logic
if [ "$ACTION" == "setup" ]; then
setup_dns
# update_host_lb_ip # Uncomment if you want to run this during setup
sudo systemctl restart dnsmasq
sudo systemctl enable dnsmasq
cat /etc/resolv.conf
cat /etc/dnsmasq.conf

elif [ "$ACTION" == "config" ]; then
update_host_lb_ip
sudo systemctl restart dnsmasq
sudo systemctl enable dnsmasq
echo "DNS config updated"
sudo cat /etc/dnsmasq.d/cluster-hosts-dns.conf
else
echo "Invalid action: $ACTION"
echo "Usage: $0 <cluster.fqdn> {setup|config}"
exit 1
fi
Loading
Loading