From 4c3b5bb9185f2762b43c4b12404b3c7c56a6de33 Mon Sep 17 00:00:00 2001 From: Shubhada Date: Wed, 22 May 2024 16:24:38 -0700 Subject: [PATCH] Add Makefile targets for building, running, and testing RP container image locally with Podman fix the code setup_resources.sh has been fixed makefile has been fixed fix the code removed unwanted files vnet create and delete code has been added Makefile has been fixed makefile modified added code to add dedicated vnet unwanted file deleted added some imp files --- Makefile | 84 +++++++++++++++---- docs/deploy-full-rp-service-in-dev.md | 2 +- hack/cluster/cluster.go | 116 +++++++++++++++++++++++--- setup_resources.sh | 50 +++++++++++ 4 files changed, 223 insertions(+), 29 deletions(-) create mode 100755 setup_resources.sh diff --git a/Makefile b/Makefile index d263c3e8d85..e13f701559a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ SHELL = /bin/bash TAG ?= $(shell git describe --exact-match 2>/dev/null) COMMIT = $(shell git rev-parse --short=7 HEAD)$(shell [[ $$(git status --porcelain) = "" ]] || echo -dirty) -ARO_IMAGE_BASE = ${RP_IMAGE_ACR}.azurecr.io/aro +ARO_IMAGE_BASE = aro-local E2E_FLAGS ?= -test.v --ginkgo.v --ginkgo.timeout 180m --ginkgo.flake-attempts=2 --ginkgo.junit-report=e2e-report.xml GO_FLAGS ?= -tags=containers_image_openpgp,exclude_graphdriver_btrfs,exclude_graphdriver_devicemapper NO_CACHE ?= true @@ -19,7 +19,7 @@ GATEKEEPER_IMAGE ?= ${RP_IMAGE_ACR}.azurecr.io/gatekeeper:$(GATEKEEPER_VERSION) GOTESTSUM = gotest.tools/gotestsum@v1.11.0 ifneq ($(shell uname -s),Darwin) - export CGO_CFLAGS=-Dgpgme_off_t=off_t + export CGO_CFLAGS=-Dgpgme_off_t=off_t endif ifeq ($(TAG),) @@ -57,8 +57,60 @@ build-all: aro: check-release generate go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro +build-local-rp-image: + @echo "Increasing Podman machine resources..." + @podman machine stop + @podman machine set --cpus 4 --memory 8192 + @podman machine start + @echo "Building local RP image..." + podman build . -f Dockerfile.ci-rp --ulimit=nofile=4096:4096 --build-arg REGISTRY=$(REGISTRY) --build-arg ARO_VERSION=$(VERSION) --no-cache=$(NO_CACHE) -t $(ARO_IMAGE) || true + runlocal-rp: - go run -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro rp + set -a; source .env; set +a; \ + $(MAKE) build-local-rp-image; \ + echo "Starting Podman service if not running..."; \ + podman machine init || true; \ + podman machine start || true; \ + echo "Podman service status:"; \ + podman machine list; \ + podman system connection list; \ + echo "Using ARO_IMAGE: $(ARO_IMAGE)"; \ + if [ -z "$(ARO_IMAGE)" ]; then echo "Error: ARO_IMAGE is not set"; exit 1; fi; \ + podman run --rm -d -p 8443:8443 --name aro-rp -e DOMAIN_NAME=$(DOMAIN_NAME) -e AZURE_FP_CLIENT_ID=$(AZURE_FP_CLIENT_ID) $(ARO_IMAGE) rp; + +ensure-aro-image: + @if ! podman image exists $(ARO_IMAGE); then \ + $(MAKE) build-local-rp-image; \ + fi + +setup: + @bash ./setup_resources.sh $(KEY_NAME) $(KEY_VERSION) + +cluster: + @set -a; source .env; set +a; \ + if [ -z "$$CLUSTER" ]; then \ + read -p "Enter the cluster name: " CLUSTER; \ + fi; \ + echo "Using CLUSTER: $$CLUSTER"; \ + export CLUSTER=$$CLUSTER; \ + CGO_ENABLED=0 go run ./hack/cluster/cluster.go create + +delete-cluster: + @set -a; source .env; set +a; \ + if [ -z "$$CLUSTER" ]; then \ + read -p "Enter the cluster name to delete: " CLUSTER; \ + fi; \ + echo "Deleting CLUSTER: $$CLUSTER"; \ + export CLUSTER=$$CLUSTER; \ + CGO_ENABLED=0 go run ./hack/cluster/cluster.go delete + +e2e: ensure-aro-image + @podman run --rm --name aro-e2e --network host \ + -e AZURE_TENANT_ID=$(AZURE_TENANT_ID) \ + -e AZURE_SUBSCRIPTION_ID=$(AZURE_SUBSCRIPTION_ID) \ + -e AZURE_CLIENT_ID=$(AZURE_CLIENT_ID) \ + -e AZURE_CLIENT_SECRET=$(AZURE_CLIENT_SECRET) \ + aro:e2e az: pyenv . pyenv/bin/activate && \ @@ -77,7 +129,8 @@ client: generate hack/build-client.sh "${AUTOREST_IMAGE}" 2020-04-30 2021-09-01-preview 2022-04-01 2022-09-04 2023-04-01 2023-07-01-preview 2023-09-04 2023-11-22 2024-08-12-preview ci-rp: fix-macos-vendor - docker build . -f Dockerfile.ci-rp --ulimit=nofile=4096:4096 --build-arg REGISTRY=$(REGISTRY) --build-arg ARO_VERSION=$(VERSION) --no-cache=$(NO_CACHE) + @echo "Building with VERSION=$(VERSION)" + docker build . -f Dockerfile.ci-rp --ulimit=nofile=4096:4096 --build-arg REGISTRY=$(REGISTRY) --build-arg ARO_VERSION=$(VERSION) --no-cache=$(NO_CACHE) -t $(ARO_IMAGE) # TODO: hard coding dev-config.yaml is clunky; it is also probably convenient to # override COMMIT. @@ -102,6 +155,7 @@ generate: # TODO: This does not work outside of GOROOT. We should replace all usage of the # clientset with controller-runtime so we don't need to generate it. + generate-operator-apiclient: go run ./vendor/k8s.io/code-generator/cmd/client-gen --clientset-name versioned --input-base ./pkg/operator/apis --input aro.openshift.io/v1alpha1,preview.aro.openshift.io/v1alpha1 --output-package ./pkg/operator/clientset --go-header-file ./hack/licenses/boilerplate.go.txt gofmt -s -w ./pkg/operator/clientset @@ -140,8 +194,8 @@ image-gatekeeper: publish-image-aro-multistage: image-aro-multistage docker push $(ARO_IMAGE) ifeq ("${RP_IMAGE_ACR}-$(BRANCH)","arointsvc-master") - docker tag $(ARO_IMAGE) arointsvc.azurecr.io/aro:latest - docker push arointsvc.azurecr.io/aro:latest + docker tag $(ARO_IMAGE) arointsvc.azurecr.io/aro:latest + docker push arointsvc.azurecr.io/aro:latest endif publish-image-autorest: image-autorest @@ -177,10 +231,10 @@ build-portal: pyenv: python3 -m venv pyenv . pyenv/bin/activate && \ - pip install -U pip && \ - pip install -r requirements.txt && \ - azdev setup -r . && \ - sed -i -e "s|^dev_sources = $(PWD)$$|dev_sources = $(PWD)/python|" ~/.azure/config + pip install -U pip && \ + pip install -r requirements.txt && \ + azdev setup -r . && \ + sed -i -e "s|^dev_sources = $(PWD)$$|dev_sources = $(PWD)/python|" ~/.azure/config secrets: @[ "${SECRET_SA_ACCOUNT_NAME}" ] || ( echo ">> SECRET_SA_ACCOUNT_NAME is not set"; exit 1 ) @@ -190,7 +244,7 @@ secrets: rm secrets.tar.gz secrets-update: - @[ "${SECRET_SA_ACCOUNT_NAME}" ] || ( echo ">> SECRET_SA_ACCOUNT_NAME is not set"; exit 1 ) + @[ "${SECRET_SA_ACCOUNT_NAME}" ] || ( echo ">> SECRET_SA_ACCOUNT_NAME}" is not set"; exit 1 ) tar -czf secrets.tar.gz secrets az storage blob upload -n secrets.tar.gz -c secrets -f secrets.tar.gz --overwrite --account-name ${SECRET_SA_ACCOUNT_NAME} >/dev/null rm secrets.tar.gz @@ -247,9 +301,9 @@ lint-admin-portal: test-python: pyenv az . pyenv/bin/activate && \ - azdev linter && \ - azdev style && \ - hack/unit-test-python.sh + azdev linter && \ + azdev style && \ + hack/unit-test-python.sh shared-cluster-login: @oc login $(shell az aro show -g sre-shared-cluster -n sre-shared-cluster -ojson --query apiserverProfile.url) \ @@ -272,7 +326,7 @@ aks.kubeconfig: hack/get-admin-aks-kubeconfig.sh vendor: - # See comments in the script for background on why we need it +# See comments in the script for background on why we need it hack/update-go-module-dependencies.sh install-go-tools: diff --git a/docs/deploy-full-rp-service-in-dev.md b/docs/deploy-full-rp-service-in-dev.md index 0d06d4cf533..a92c7f3633c 100644 --- a/docs/deploy-full-rp-service-in-dev.md +++ b/docs/deploy-full-rp-service-in-dev.md @@ -338,7 +338,7 @@ export RESOURCEGROUP=myResourceGroup ``` -1. Create the resource group if it doesn't exist +1. Create the if it doesn't exist ```bash az group create --resource-group $RESOURCEGROUP --location $LOCATION ``` diff --git a/hack/cluster/cluster.go b/hack/cluster/cluster.go index fd822d32dd2..d64b6cf1a21 100644 --- a/hack/cluster/cluster.go +++ b/hack/cluster/cluster.go @@ -17,12 +17,28 @@ import ( msgraph_errors "github.com/Azure/ARO-RP/pkg/util/graph/graphsdk/models/odataerrors" utillog "github.com/Azure/ARO-RP/pkg/util/log" "github.com/Azure/ARO-RP/pkg/util/version" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" ) const ( Cluster = "CLUSTER" ) +func main() { + log := utillog.GetLogger() + + if err := run(context.Background(), log); err != nil { + if oDataError, ok := err.(msgraph_errors.ODataErrorable); ok { + spew.Dump(oDataError.GetErrorEscaped()) + } + log.Fatal(err) + } +} + func run(ctx context.Context, log *logrus.Entry) error { if len(os.Args) != 2 { return fmt.Errorf("usage: CLUSTER=x %s {create,createApp,deleteApp,delete}", os.Args[0]) @@ -37,11 +53,43 @@ func run(ctx context.Context, log *logrus.Entry) error { return err } - vnetResourceGroup := os.Getenv("RESOURCEGROUP") // TODO: remove this when we deploy and peer a vnet per cluster create - if os.Getenv("CI") != "" { - vnetResourceGroup = os.Getenv(Cluster) - } clusterName := os.Getenv(Cluster) + vnetName := fmt.Sprintf("%s-vnet", clusterName) + resourceGroup := os.Getenv("RESOURCEGROUP") + location := os.Getenv("LOCATION") + + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + + resourceClient, err := armresources.NewResourceGroupsClient(os.Getenv("AZURE_SUBSCRIPTION_ID"), cred, nil) + if err != nil { + log.Fatalf("failed to create resource group client: %v", err) + } + + vnetClient, err := armnetwork.NewVirtualNetworksClient(os.Getenv("AZURE_SUBSCRIPTION_ID"), cred, nil) + if err != nil { + log.Fatalf("failed to create virtual network client: %v", err) + } + + // Create the resource group + log.Infof("Creating resource group %s in location %s...\n", resourceGroup, location) + _, err = resourceClient.CreateOrUpdate(ctx, resourceGroup, armresources.ResourceGroup{ + Location: to.Ptr(location), + }, nil) + if err != nil { + log.Fatalf("failed to create resource group: %v", err) + } + + // Create the virtual network + log.Infof("Creating virtual network %s in resource group %s...\n", vnetName, resourceGroup) + err = createVNet(ctx, log, vnetClient, resourceGroup, vnetName, location) + if err != nil { + return err + } + + log.Infof("Created virtual network %s in resource group %s\n", vnetName, resourceGroup) osClusterVersion := os.Getenv("OS_CLUSTER_VERSION") if osClusterVersion == "" { @@ -58,25 +106,67 @@ func run(ctx context.Context, log *logrus.Entry) error { switch strings.ToLower(os.Args[1]) { case "create": - return c.Create(ctx, vnetResourceGroup, clusterName, osClusterVersion) + err = c.Create(ctx, resourceGroup, clusterName, osClusterVersion) + if err != nil { + // If cluster creation fails, delete the created VNet + deleteVNet(ctx, log, vnetClient, resourceGroup, vnetName) + } + return err case "createapp": return c.CreateApp(ctx, clusterName) case "deleteapp": return c.DeleteApp(ctx) case "delete": - return c.Delete(ctx, vnetResourceGroup, clusterName) + err = c.Delete(ctx, resourceGroup, clusterName) + if err == nil { + // If cluster deletion succeeds, delete the created VNet + deleteVNet(ctx, log, vnetClient, resourceGroup, vnetName) + } + return err default: return fmt.Errorf("invalid command %s", os.Args[1]) } } -func main() { - log := utillog.GetLogger() +func createVNet(ctx context.Context, log *logrus.Entry, vnetClient *armnetwork.VirtualNetworksClient, resourceGroup string, vnetName string, location string) error { + _, err := vnetClient.BeginCreateOrUpdate(ctx, resourceGroup, vnetName, armnetwork.VirtualNetwork{ + Location: to.Ptr(location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.0.0.0/16"), + }, + }, + Subnets: []*armnetwork.Subnet{ + { + Name: to.Ptr("master"), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr("10.0.0.0/24"), + }, + }, + { + Name: to.Ptr("worker"), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr("10.0.1.0/24"), + }, + }, + }, + }, + }, nil) + if err != nil { + log.Errorf("Failed to create VNet: %v", err) + return err + } + log.Infof("Created VNet %s in resource group %s", vnetName, resourceGroup) + return nil +} - if err := run(context.Background(), log); err != nil { - if oDataError, ok := err.(msgraph_errors.ODataErrorable); ok { - spew.Dump(oDataError.GetErrorEscaped()) - } - log.Fatal(err) +func deleteVNet(ctx context.Context, log *logrus.Entry, vnetClient *armnetwork.VirtualNetworksClient, resourceGroup string, vnetName string) error { + _, err := vnetClient.BeginDelete(ctx, resourceGroup, vnetName, nil) + if err != nil { + log.Errorf("Failed to delete VNet: %v", err) + return err } + log.Infof("Deleted VNet %s in resource group %s", vnetName, resourceGroup) + return nil } diff --git a/setup_resources.sh b/setup_resources.sh new file mode 100755 index 00000000000..066c92bd0c8 --- /dev/null +++ b/setup_resources.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Load environment variables from .env file +set -a +source .env +set +a + +# Set cluster-specific environment variables +RESOURCEGROUP="v4-westeurope" +DATABASE_ACCOUNT_NAME="${USER}-aro-${LOCATION}" +KEYVAULT_PREFIX="${USER}-aro-${LOCATION}" + +# Create resource group +echo "Ensuring resource group $RESOURCEGROUP in $LOCATION..." +az group show --name "$RESOURCEGROUP" &>/dev/null +if [ $? -ne 0 ]; then + az group create --name "$RESOURCEGROUP" --location "$LOCATION" +else + echo "Resource group $RESOURCEGROUP already exists." +fi + +# Create virtual network +echo "Ensuring virtual network dev-vnet in resource group $RESOURCEGROUP..." +az network vnet show --resource-group "$RESOURCEGROUP" --name dev-vnet &>/dev/null +if [ $? -ne 0 ]; then + az network vnet create --resource-group "$RESOURCEGROUP" --name dev-vnet --address-prefix 10.0.0.0/16 --subnet-name dev-subnet --subnet-prefix 10.0.0.0/24 +else + echo "Virtual network dev-vnet already exists." +fi + +# Create key vault +echo "Ensuring key vault ${KEYVAULT_PREFIX}-kv in resource group $RESOURCEGROUP..." +az keyvault show --name "${KEYVAULT_PREFIX}-kv" --resource-group "$RESOURCEGROUP" &>/dev/null +if [ $? -ne 0 ]; then + az keyvault create --name "${KEYVAULT_PREFIX}-kv" --resource-group "$RESOURCEGROUP" --location "$LOCATION" +else + echo "Key vault ${KEYVAULT_PREFIX}-kv already exists." +fi + +# Create disk encryption set +KEY_URL="https://${KEYVAULT_PREFIX}-kv.vault.azure.net/keys//" # Replace with the actual key URL +echo "Ensuring disk encryption set ${RESOURCEGROUP}-disk-encryption-set in resource group $RESOURCEGROUP..." +az disk-encryption-set show --name "${RESOURCEGROUP}-disk-encryption-set" --resource-group "$RESOURCEGROUP" &>/dev/null +if [ $? -ne 0 ]; then + az disk-encryption-set create --name "${RESOURCEGROUP}-disk-encryption-set" --resource-group "$RESOURCEGROUP" --location "$LOCATION" --source-vault "/subscriptions/$AZURE_SUBSCRIPTION_ID/resourceGroups/$RESOURCEGROUP/providers/Microsoft.KeyVault/vaults/${KEYVAULT_PREFIX}-kv" --key-url "$KEY_URL" +else + echo "Disk encryption set ${RESOURCEGROUP}-disk-encryption-set already exists." +fi + +echo "Resource setup completed."