Skip to content

Commit

Permalink
Merge pull request #770 from chrischdi/pr-cherry-pick-763-to-release-…
Browse files Browse the repository at this point in the history
…1.23

[release-1.23] CI: move to new resource pool and folder in VMC and migrate to new IPAM
  • Loading branch information
k8s-ci-robot authored Sep 29, 2023
2 parents 7536e55 + 5c7697a commit 627b539
Show file tree
Hide file tree
Showing 5 changed files with 59 additions and 32 deletions.
2 changes: 1 addition & 1 deletion charts/vsphere-cpi/templates/daemonset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- else }}
node-role.kubernetes.io/control-plane: "true"
node-role.kubernetes.io/control-plane: ""
{{- end }}
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
Expand Down
68 changes: 43 additions & 25 deletions hack/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ REPO_ROOT=$(git rev-parse --show-toplevel)
on_exit() {
# release IPClaim
echo "Releasing IP claims"
kubectl --kubeconfig="${KUBECONFIG}" delete "$(append_api_group ipclaim)" "${IPCLAIM_NAME}" || true
kubectl --kubeconfig="${KUBECONFIG}" delete "$(append_api_group ipclaim)" "${WORKLOAD_IPCLAIM_NAME}" || true
kubectl --kubeconfig="${KUBECONFIG}" delete "ipaddressclaim.ipam.cluster.x-k8s.io" "${CONTROL_PLANE_IPCLAIM_NAME}" || true
kubectl --kubeconfig="${KUBECONFIG}" delete "ipaddressclaim.ipam.cluster.x-k8s.io" "${WORKLOAD_IPCLAIM_NAME}" || true

# kill the VPN
docker kill vpn
Expand Down Expand Up @@ -60,36 +60,54 @@ docker logs vpn
# Sleep to allow vpn container to start running
sleep 30

function append_api_group() {
resource=$1
echo "${resource}.ipam.metal3.io"
function kubectl_get_jsonpath() {
local OBJECT_KIND="${1}"
local OBJECT_NAME="${2}"
local JSON_PATH="${3}"
local n=0
until [ $n -ge 30 ]; do
OUTPUT=$(kubectl --kubeconfig="${KUBECONFIG}" get "${OBJECT_KIND}.ipam.cluster.x-k8s.io" "${OBJECT_NAME}" -o=jsonpath="${JSON_PATH}")
if [[ "${OUTPUT}" != "" ]]; then
break
fi
n=$((n + 1))
sleep 1
done

if [[ "${OUTPUT}" == "" ]]; then
echo "Received empty output getting ${JSON_PATH} from ${OBJECT_KIND}/${OBJECT_NAME}" 1>&2
return 1
else
echo "${OUTPUT}"
return 0
fi
}

# Retrieve an IP to be used as the kube-vip IP
KUBECONFIG="/root/ipam-conf/capv-services.conf"
function claim_ip() {
IPCLAIM_NAME="$1"
export IPCLAIM_NAME
sed \
-e "s/\${IPCLAIM_NAME}/${IPCLAIM_NAME}/" \
-e "s/\${BUILD_ID}/${BUILD_ID}/" \
-e "s/\${JOB_NAME}/${JOB_NAME}/" \
"${REPO_ROOT}/hack/ipclaim-template.yaml" | kubectl --kubeconfig="${KUBECONFIG}" create -f - 1>&2
IPADDRESS_NAME=$(kubectl_get_jsonpath ipaddressclaim "${IPCLAIM_NAME}" '{@.status.addressRef.name}')
kubectl --kubeconfig="${KUBECONFIG}" get "ipaddresses.ipam.cluster.x-k8s.io" "${IPADDRESS_NAME}" -o=jsonpath='{@.spec.address}'
}

function acquire_ip_for_management_cluster_cp() {
IPCLAIM_NAME="ip-claim-$(openssl rand -hex 20)"
sed "s/IPCLAIM_NAME/${IPCLAIM_NAME}/" "${REPO_ROOT}/hack/ipclaim-template.yaml" | kubectl --kubeconfig=${KUBECONFIG} create -f -
export KUBECONFIG="/root/ipam-conf/capv-services.conf"

IPADDRESS_NAME=$(kubectl --kubeconfig=${KUBECONFIG} get "$(append_api_group ipclaim)" "${IPCLAIM_NAME}" -o=jsonpath='{@.status.address.name}')
CONTROL_PLANE_ENDPOINT_IP=$(kubectl --kubeconfig=${KUBECONFIG} get "$(append_api_group ipaddresses)" "${IPADDRESS_NAME}" -o=jsonpath='{@.spec.address}')
export CONTROL_PLANE_ENDPOINT_IP
echo "Acquired Control Plane IP: $CONTROL_PLANE_ENDPOINT_IP"
}

function acquire_ip_for_workload_cluster_cp() {
WORKLOAD_IPCLAIM_NAME="workload-ip-claim-$(openssl rand -hex 20)"
sed "s/IPCLAIM_NAME/${WORKLOAD_IPCLAIM_NAME}/" "${REPO_ROOT}/hack/ipclaim-template.yaml" | kubectl --kubeconfig=${KUBECONFIG} create -f -
# Retrieve an IP to be used as the kube-vip IP
CONTROL_PLANE_IPCLAIM_NAME="ip-claim-$(openssl rand -hex 20)"
CONTROL_PLANE_ENDPOINT_IP=$(claim_ip "${CONTROL_PLANE_IPCLAIM_NAME}")

WORKLOAD_IPADDRESS_NAME=$(kubectl --kubeconfig=${KUBECONFIG} get "$(append_api_group ipclaim)" "${WORKLOAD_IPCLAIM_NAME}" -o=jsonpath='{@.status.address.name}')
WORKLOAD_CONTROL_PLANE_ENDPOINT_IP=$(kubectl --kubeconfig=${KUBECONFIG} get "$(append_api_group ipaddresses)" "${WORKLOAD_IPADDRESS_NAME}" -o=jsonpath='{@.spec.address}')
export WORKLOAD_CONTROL_PLANE_ENDPOINT_IP
echo "Acquired Workload Cluster Control Plane IP: $WORKLOAD_CONTROL_PLANE_ENDPOINT_IP"
}
# Retrieve an IP to be used for the workload cluster in v1a3/v1a4 -> v1b1 upgrade tests
WORKLOAD_IPCLAIM_NAME="workload-ip-claim-$(openssl rand -hex 20)"
WORKLOAD_CONTROL_PLANE_ENDPOINT_IP=$(claim_ip "${WORKLOAD_IPCLAIM_NAME}")

acquire_ip_for_management_cluster_cp
acquire_ip_for_workload_cluster_cp
export CONTROL_PLANE_ENDPOINT_IP
export WORKLOAD_CONTROL_PLANE_ENDPOINT_IP

GCR_KEY_FILE="${GCR_KEY_FILE:-}"
login
Expand Down
13 changes: 9 additions & 4 deletions hack/ipclaim-template.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
apiVersion: ipam.metal3.io/v1alpha1
kind: IPClaim
apiVersion: ipam.cluster.x-k8s.io/v1alpha1
kind: IPAddressClaim
metadata:
name: IPCLAIM_NAME
name: ${IPCLAIM_NAME}
annotations:
prow.k8s.io/build-id: "${BUILD_ID}"
prow.k8s.io/job: "${JOB_NAME}"
spec:
pool:
poolRef:
apiGroup: ipam.cluster.x-k8s.io
kind: InClusterIPPool
name: capv-e2e-ippool
4 changes: 4 additions & 0 deletions hack/release.sh
Original file line number Diff line number Diff line change
Expand Up @@ -154,10 +154,14 @@ function sha_sum() {
function push_ccm_bin() {
local bucket="vsphere-cpi-${BUILD_RELEASE_TYPE}"

if gsutil -q stat "gs://${bucket}/${VERSION}/bin/linux/amd64/vsphere-cloud-controller-manager"; then
echo "gs://${bucket}/${VERSION}/bin/linux/amd64/vsphere-cloud-controller-manager exists, skip pushing"
else
sha_sum ".build/bin/vsphere-cloud-controller-manager.linux_amd64"
echo "copying ccm version ${VERSION} to ${bucket}"
gsutil cp ".build/bin/vsphere-cloud-controller-manager.linux_amd64" "gs://${bucket}/${VERSION}/bin/linux/amd64/vsphere-cloud-controller-manager"
gsutil cp ".build/bin/vsphere-cloud-controller-manager.linux_amd64.sha256" "gs://${bucket}/${VERSION}/bin/linux/amd64/vsphere-cloud-controller-manager.sha256"
fi
}

# Start of main script
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/config/vsphere-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,8 @@ variables:
# Following CAPV variables should be set before testing
VSPHERE_TLS_THUMBPRINT: "18:EC:35:60:54:68:92:F6:F8:92:3E:4D:11:A1:0D:13:9C:E9:3E:B6"
VSPHERE_DATACENTER: "SDDC-Datacenter"
VSPHERE_FOLDER: "clusterapi"
VSPHERE_RESOURCE_POOL: "clusterapi"
VSPHERE_FOLDER: "cloud-provider-vsphere"
VSPHERE_RESOURCE_POOL: "cloud-provider-vsphere"
VSPHERE_DATASTORE: "WorkloadDatastore"
VSPHERE_STORAGE_POLICY: "Cluster API vSphere Storage Policy"
VSPHERE_NETWORK: "sddc-cgw-network-6"
Expand Down

0 comments on commit 627b539

Please sign in to comment.