Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migrate VPN and rp service to Docker Compose #3882

Merged
merged 9 commits into from
Oct 10, 2024
30 changes: 14 additions & 16 deletions .pipelines/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,22 +11,20 @@ trigger:
include:
- v2*

# PR triggers are overridden in the ADO UI

resources:
containers:
- container: golang
image: mcr.microsoft.com/onebranch/cbl-mariner/build:2.0
options: --user=0
- container: python
image: registry.access.redhat.com/ubi8/python-39:latest
options: --user=0
- container: ubi8
image: registry.access.redhat.com/ubi8/toolbox:8.8
options: --user=0 --privileged -v /dev/shm:/dev/shm --device /dev/net/tun --name vpn

variables:
- template: vars.yml
- name: REGISTRY
value: registry.access.redhat.com
- name: LOCAL_ARO_RP_IMAGE
value: "arosvcdev.azurecr.io/aro"
- name: LOCAL_ARO_AZEXT_IMAGE
value: "arosvcdev.azurecr.io/azext-aro"
- name: LOCAL_VPN_IMAGE
value: "arosvcdev.azurecr.io/vpn"
- name: TAG
value: $(Build.BuildId)
- name: VERSION
value: $(Build.BuildId)

jobs:
- job: Build_Test_And_Push_Az_ARO_Extension
Expand All @@ -38,7 +36,7 @@ jobs:
# Build and test the Az ARO Extension
- script: |
set -xe
DOCKER_BUILD_CI_ARGS="--load" make ci-azext-aro VERSION=$(Build.BuildId)
DOCKER_BUILD_CI_ARGS="--load" make ci-azext-aro VERSION=$(VERSION)
displayName: 🛠 Build & Test Az ARO Extension
# Push the image to ACR
Expand All @@ -57,7 +55,7 @@ jobs:
# Build and test RP and Portal
- script: |
set -xe
DOCKER_BUILD_CI_ARGS="--load" make ci-rp VERSION=$(Build.BuildId)
DOCKER_BUILD_CI_ARGS="--load" make ci-rp VERSION=$(VERSION)
displayName: 🛠 Build & Test RP and Portal
# Publish test results
Expand Down
66 changes: 45 additions & 21 deletions .pipelines/templates/template-acr-push.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ parameters:
default: false

steps:
# Authenticate to ACR and push the image
- task: AzureCLI@2
displayName: 'Authenticate to Azure and Push Docker Image'
inputs:
Expand All @@ -20,6 +19,24 @@ steps:
scriptLocation: 'inlineScript'
inlineScript: |
set -xe
# Install Docker dependencies
echo "Installing Docker and Docker Compose Plugin..."
sudo apt-get update
sudo apt-get install -y ca-certificates curl gnupg
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo tee /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo \"$VERSION_CODENAME\") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
sudo systemctl start docker
sudo systemctl enable docker
# Authenticate to Azure and ACR
echo "Authenticating to Azure and ACR..."
ACR_FQDN="${{ parameters.acrFQDN }}"
REGISTRY_NAME=$(echo $ACR_FQDN | cut -d'.' -f1)
Expand All @@ -31,38 +48,45 @@ steps:
echo "Listing Docker images..."
docker images
# Ensure the image is available locally before tagging the build ID
IMAGE_NAME="${{ parameters.repository }}:$(Build.BuildId)"
# Define both the full repository image name and the local name
IMAGE_NAME="${ACR_FQDN}/${{ parameters.repository }}:$(VERSION)"
LOCAL_IMAGE="${{ parameters.repository }}:$(VERSION)"
# Check if the image exists locally with the full repository tag
echo "Checking for image $IMAGE_NAME..."
if [[ "$(docker images -q $IMAGE_NAME 2> /dev/null)" == "" ]]; then
echo "Error: Image $IMAGE_NAME not found. Exiting."
exit 1
# If the full repository tagged image does not exist, check for the local image
echo "Full repository image not found. Checking for local image $LOCAL_IMAGE..."
if [[ "$(docker images -q $LOCAL_IMAGE 2> /dev/null)" == "" ]]; then
echo "Error: Neither $IMAGE_NAME nor $LOCAL_IMAGE found. Exiting."
exit 1
else
# Retag the local image with the full repository path
echo "Local image $LOCAL_IMAGE found. Retagging with full repository path..."
docker tag $LOCAL_IMAGE $IMAGE_NAME
fi
else
echo "Image $IMAGE_NAME found. Proceeding to push..."
fi
# Ensure the image is available locally before tagging 'latest'
IMAGE_LATEST="${{ parameters.repository }}:latest"
IMAGE_LATEST="${ACR_FQDN}/${{ parameters.repository }}:latest"
echo "Checking for image $IMAGE_LATEST..."
if [[ "$(docker images -q $IMAGE_LATEST 2> /dev/null)" == "" ]]; then
echo "Warning: Image $IMAGE_LATEST not found. Skipping latest tag."
echo "Warning: Image $IMAGE_LATEST not found. Skipping 'latest' tag."
SKIP_LATEST=true
else
echo "Image $IMAGE_LATEST found. Proceeding with 'latest' tag."
SKIP_LATEST=false
fi
# Tag the image with the ACR repository for the build ID
echo "Tagging image with build ID..."
docker tag $IMAGE_NAME ${ACR_FQDN}/${{ parameters.repository }}:$(Build.BuildId)
# If the latest image exists, tag it as well
if [ "$SKIP_LATEST" == "false" ]; then
echo "Tagging image with 'latest'..."
docker tag $IMAGE_LATEST ${ACR_FQDN}/${{ parameters.repository }}:latest
fi
# Push the Docker image to ACR with build ID
# Push the Docker image to ACR with the build ID
echo "Pushing image with build ID to ACR..."
docker push ${ACR_FQDN}/${{ parameters.repository }}:$(Build.BuildId)
docker push $IMAGE_NAME
# Optionally push the image as 'latest'
if [ "${{ parameters.pushLatest }}" == "true" ] && [ "$SKIP_LATEST" == "false" ]; then
echo "Pushing 'latest' tag to ACR..."
docker push ${ACR_FQDN}/${{ parameters.repository }}:latest
echo "Tagging image with 'latest' and pushing..."
docker tag $IMAGE_NAME $IMAGE_LATEST
docker push $IMAGE_LATEST
fi
8 changes: 8 additions & 0 deletions Dockerfile.vpn
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# Use a Microsoft-approved image
FROM mcr.microsoft.com/azure-cli:2.61.0 AS base
SudoBrendan marked this conversation as resolved.
Show resolved Hide resolved

# Install OpenVPN
USER root
RUN apk add --no-cache openvpn || tdnf install -y openvpn || dnf install -y openvpn

ENTRYPOINT openvpn
2 changes: 2 additions & 0 deletions Dockerfile.vpn.dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# ignore everything
*
119 changes: 12 additions & 107 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -399,29 +399,23 @@ ci-clean:

.PHONY: ci-rp
ci-rp: fix-macos-vendor
docker build . $(DOCKER_BUILD_CI_ARGS) \
docker build . ${DOCKER_BUILD_CI_ARGS} \
-f Dockerfile.ci-rp \
--ulimit=nofile=4096:4096 \
--build-arg REGISTRY=$(REGISTRY) \
--build-arg ARO_VERSION=$(VERSION) \
--no-cache=$(NO_CACHE) \
--build-arg REGISTRY=${REGISTRY} \
--build-arg ARO_VERSION=${VERSION} \
--no-cache=${NO_CACHE} \
--target=builder \
-t $(LOCAL_ARO_RP_BUILD_IMAGE):$(VERSION)
-t ${LOCAL_ARO_RP_BUILD_IMAGE}:${VERSION}

docker build . $(DOCKER_BUILD_CI_ARGS) \
-f Dockerfile.ci-rp \
--ulimit=nofile=4096:4096 \
--build-arg REGISTRY=$(REGISTRY) \
--build-arg ARO_VERSION=$(VERSION) \
-t $(LOCAL_ARO_RP_IMAGE):$(VERSION)
docker compose build rp

# Extract test coverage files from build to local filesystem
docker create --name extract_cover_out $(LOCAL_ARO_RP_BUILD_IMAGE):$(VERSION); \
docker create --name extract_cover_out ${LOCAL_ARO_RP_BUILD_IMAGE}:${VERSION}; \
docker cp extract_cover_out:/app/report.xml ./report.xml; \
docker cp extract_cover_out:/app/coverage.xml ./coverage.xml; \
docker rm extract_cover_out;


.PHONY: ci-tunnel
ci-tunnel: fix-macos-vendor
podman $(PODMAN_REMOTE_ARGS) \
Expand All @@ -441,102 +435,13 @@ ifeq ($(shell uname -s),Darwin)
mv ./vendor/github.com/Microsoft ./vendor/github.com/temp-microsoft && mv ./vendor/github.com/temp-microsoft ./vendor/github.com/microsoft || true
endif

.PHONY: podman-secrets
podman-secrets: aks.kubeconfig
podman $(PODMAN_REMOTE_ARGS) secret rm --ignore aks.kubeconfig
podman $(PODMAN_REMOTE_ARGS) secret create aks.kubeconfig ./aks.kubeconfig

podman $(PODMAN_REMOTE_ARGS) secret rm --ignore proxy-client.key
podman $(PODMAN_REMOTE_ARGS) secret create proxy-client.key ./secrets/proxy-client.key

podman $(PODMAN_REMOTE_ARGS) secret rm --ignore proxy-client.crt
podman $(PODMAN_REMOTE_ARGS) secret create proxy-client.crt ./secrets/proxy-client.crt

podman $(PODMAN_REMOTE_ARGS) secret rm --ignore proxy.crt
podman $(PODMAN_REMOTE_ARGS) secret create proxy.crt ./secrets/proxy.crt

.PHONY: run-portal
run-portal: ci-rp podman-secrets
podman $(PODMAN_REMOTE_ARGS) \
run \
--name aro-portal \
--rm \
-p 127.0.0.1:8444:8444 \
-p 127.0.0.1:2222:2222 \
--cap-drop net_raw \
-e RP_MODE \
-e AZURE_SUBSCRIPTION_ID \
-e AZURE_TENANT_ID \
-e LOCATION \
-e RESOURCEGROUP \
-e AZURE_PORTAL_CLIENT_ID \
-e AZURE_PORTAL_ELEVATED_GROUP_IDS \
-e AZURE_PORTAL_ACCESS_GROUP_IDS \
-e AZURE_RP_CLIENT_SECRET \
-e AZURE_RP_CLIENT_ID \
-e KEYVAULT_PREFIX \
-e DATABASE_ACCOUNT_NAME \
-e DATABASE_NAME \
-e NO_NPM=1 \
--secret proxy-client.key,target=/app/secrets/proxy-client.key \
--secret proxy-client.crt,target=/app/secrets/proxy-client.crt \
--secret proxy.crt,target=/app/secrets/proxy.crt \
$(LOCAL_ARO_RP_IMAGE):$(VERSION) portal
run-portal:
docker compose up portal

# run-rp executes the RP locally as similarly as possible to production. That
# includes the use of Hive, meaning you need a VPN connection.
.PHONY: run-rp
run-rp: ci-rp podman-secrets
podman $(PODMAN_REMOTE_ARGS) \
run \
--name aro-rp \
--rm \
-p 127.0.0.1:8443:8443 \
-w /app \
-e ARO_IMAGE \
-e RP_MODE="development" \
-e PROXY_HOSTNAME \
-e DOMAIN_NAME \
-e AZURE_RP_CLIENT_ID \
-e AZURE_FP_CLIENT_ID \
-e AZURE_SUBSCRIPTION_ID \
-e AZURE_TENANT_ID \
-e AZURE_RP_CLIENT_SECRET \
-e LOCATION \
-e RESOURCEGROUP \
-e AZURE_ARM_CLIENT_ID \
-e AZURE_FP_SERVICE_PRINCIPAL_ID \
-e AZURE_DBTOKEN_CLIENT_ID \
-e AZURE_PORTAL_CLIENT_ID \
-e AZURE_PORTAL_ACCESS_GROUP_IDS \
-e AZURE_CLIENT_ID \
-e AZURE_SERVICE_PRINCIPAL_ID \
-e AZURE_CLIENT_SECRET \
-e AZURE_GATEWAY_CLIENT_ID \
-e AZURE_GATEWAY_SERVICE_PRINCIPAL_ID \
-e AZURE_GATEWAY_CLIENT_SECRET \
-e DATABASE_NAME \
-e PULL_SECRET \
-e SECRET_SA_ACCOUNT_NAME \
-e DATABASE_ACCOUNT_NAME \
-e KEYVAULT_PREFIX \
-e ADMIN_OBJECT_ID \
-e PARENT_DOMAIN_NAME \
-e PARENT_DOMAIN_RESOURCEGROUP \
-e AZURE_ENVIRONMENT \
-e STORAGE_ACCOUNT_DOMAIN \
-e OIDC_STORAGE_ACCOUNT_NAME \
-e KUBECONFIG="/app/secrets/aks.kubeconfig" \
-e HIVE_KUBE_CONFIG_PATH="/app/secrets/aks.kubeconfig" \
-e ARO_CHECKOUT_PATH="/app" \
-e ARO_INSTALL_VIA_HIVE="true" \
-e ARO_ADOPT_BY_HIVE="true" \
-e MOCK_MSI_TENANT_ID \
-e MOCK_MSI_CLIENT_ID \
-e MOCK_MSI_OBJECT_ID \
-e MOCK_MSI_CERT \
--secret aks.kubeconfig,target=/app/secrets/aks.kubeconfig \
--secret proxy-client.key,target=/app/secrets/proxy-client.key \
--secret proxy-client.crt,target=/app/secrets/proxy-client.crt \
--secret proxy.crt,target=/app/secrets/proxy.crt \
$(LOCAL_ARO_RP_IMAGE):$(VERSION) rp
run-rp: aks.kubeconfig
docker compose rm -sf rp
docker compose up rp
Loading
Loading