From 7602f97999661c907a1159b266168dd568599427 Mon Sep 17 00:00:00 2001 From: Samuel Jones Date: Fri, 12 Apr 2024 15:00:59 +0100 Subject: [PATCH] Fix many issues with deployment of the capi stuff --- .gitignore | 3 +- c-api/README.md | 69 ++++------------------------ c-api/management-cluster/values.yaml | 20 ++++---- c-api/prod-cluster/user-values.yaml | 2 +- c-api/prod-cluster/values.yaml | 26 ++++------- c-api/staging-cluster/values.yaml | 25 ++++------ 6 files changed, 35 insertions(+), 110 deletions(-) diff --git a/.gitignore b/.gitignore index e780cb9..e5ffb8d 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,5 @@ inventory*.ini haproxy.cfg *clouds*.yaml -.idea \ No newline at end of file +.idea +*.kubeconfig diff --git a/c-api/README.md b/c-api/README.md index 4527520..9414fe8 100644 --- a/c-api/README.md +++ b/c-api/README.md @@ -24,61 +24,8 @@ Start minikube minikube start ``` -Configure local cluster to create management cluster - -```bash -clusterctl init --infrastructure openstack - -cd management-cluster -export CLUSTER_NAME="management" - -kubectl create namespace clusters - -helm repo add capi https://stackhpc.github.io/capi-helm-charts -helm repo add capi-addons https://stackhpc.github.io/cluster-api-addon-provider -helm repo update - -helm upgrade cluster-api-addon-provider capi-addons/cluster-api-addon-provider --install --wait -n clusters --version 0.3.1 - -helm upgrade $CLUSTER_NAME capi/openstack-cluster --install -f values.yaml -f clouds.yaml -f user-values.yaml -f flavors.yaml -n clusters -cd .. -``` - -Wait for the above to finish by running this command and waiting for the cluster to report as "Ready: True": - -```bash -watch clusterctl describe cluster -n clusters $CLUSTER_NAME -``` - -## Migrate to management cluster from local - -Now that the manegement cluster exists we should ensure that it is managed by itself and not a local cluster on our dev machine. Run the following: - -```bash -clusterctl get kubeconfig $CLUSTER_NAME -n clusters > "kubeconfig-$CLUSTER_NAME" -clusterctl init --infrastructure openstack --kubeconfig="kubeconfig-$CLUSTER_NAME" -clusterctl move --to-kubeconfig "kubeconfig-$CLUSTER_NAME" -``` - -This management cluster will also need the cluster-api-addon-provider so install that now. - -```bash -kubectl create namespace clusters --kubeconfig "kubeconfig-$CLUSTER_NAME" -helm upgrade cluster-api-addon-provider capi-addons/cluster-api-addon-provider --install --wait -n clusters --version 0.3.1 -helm upgrade $CLUSTER_NAME capi/openstack-cluster --install -f values.yaml -f clouds.yaml -f user-values.yaml -f flavors.yaml --wait -n clusters -``` - -Wait for the migration to complete by waiting for the following to report as complete: - -```bash -kubectl get kubeadmcontrolplane --kubeconfig=kubeconfig-$CLUSTER_NAME -``` - -Now that we are done with that migration you probably want to run the following command to change your kubeconfig file: - -```bash -export KUBECONFIG="" -``` +Follow this: +https://stfc.atlassian.net/wiki/spaces/CLOUDKB/pages/211878034/Cluster+API+Setup#Moving-the-control-plane ## Staging cluster setup using management @@ -140,17 +87,17 @@ kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/st Setup ArgoCD CLI: https://argo-cd.readthedocs.io/en/stable/getting_started/#2-download-argo-cd-cli -Create the Service type load balancer: +Get the initial password: ```shell -kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "LoadBalancer"}}' +argocd admin initial-password -n argocd ``` -Get the initial password: +Portforward the UI to port 8080 using this command, allowing temporary access on http://localhost:8080: ```shell -argocd admin initial-password -n argocd +kubectl port-forward service/argocd-server 80:8080 --namespace=argocd ``` -Login to the UI using the IP provided by the ArgoCD load balancer this will be an IP inside the firewall. The username is admin, and the password you already have. +Login to the UI, the username is admin, and the password you already have. Change the password using the user settings to the one in Keeper so everyone who needs the password has it available. @@ -186,6 +133,6 @@ argocd --port-forward --port-forward-namespace=argocd cluster add prod --yes Once you've done this, change the name of the cluster in the UI to just `prod` instead of the long context name. -Then follow the rest of the instructions in the gitops repo for adding the app of apps. These can be found [here](https://github.com/interactivereduction/gitops/blob/main/README.md#how-to-deploy-the-app-of-apps). +Then follow the rest of the instructions in the gitops repo for adding the app of apps. These can be found [here](https://github.com/fiaisis/gitops/blob/main/README.md#how-to-deploy-the-app-of-apps). Don't forget to update the sealed secrets in for every app for the new clusters! \ No newline at end of file diff --git a/c-api/management-cluster/values.yaml b/c-api/management-cluster/values.yaml index 21eed9a..368ff32 100644 --- a/c-api/management-cluster/values.yaml +++ b/c-api/management-cluster/values.yaml @@ -132,7 +132,6 @@ cloudCACert: | # The name of the cloud to use from the specified clouds.yaml cloudName: openstack -# Values for the Kubernetes cluster network kubeNetwork: # By default, use the private network range 10.0.0.0/12 for the cluster network # We split it into two equally-sized blocks for pods and services @@ -140,21 +139,12 @@ kubeNetwork: # internal net on 172.16.x.y pods: cidrBlocks: - - 10.0.0.0/12 + - 10.0.0.0/13 services: cidrBlocks: - - 10.7.0.0/13 + - 10.8.0.0/13 serviceDomain: cluster.local -# Settings for the OpenStack networking for the cluster -clusterNetworking: - # Custom nameservers to use for the hosts - dnsNameservers: - - 130.246.209.132 - - 130.246.209.163 - - 130.246.208.220 - externalNetworkId: External - # Settings for registry mirrors registryMirrors: { docker.io: ["https://dockerhub.stfc.ac.uk"] } @@ -172,6 +162,12 @@ addons: enabled: false ingress: enabled: true + nginx: + release: + values: + controller: + service: + loadBalancerIP: "130.246.214.230" openstack: enabled: true csiCinder: diff --git a/c-api/prod-cluster/user-values.yaml b/c-api/prod-cluster/user-values.yaml index 98cbed0..1071d0a 100644 --- a/c-api/prod-cluster/user-values.yaml +++ b/c-api/prod-cluster/user-values.yaml @@ -13,7 +13,7 @@ controlPlane: # The number of control plane machines to deploy # For high-availability, this should be greater than 1 # For etcd quorum, it should be odd - usually 3, or 5 for very large clusters - machineCount: 3 + machineCount: 5 # The flavor to use for control plane machines machineFlavor: l3.nano diff --git a/c-api/prod-cluster/values.yaml b/c-api/prod-cluster/values.yaml index e677dc5..8521337 100644 --- a/c-api/prod-cluster/values.yaml +++ b/c-api/prod-cluster/values.yaml @@ -133,7 +133,6 @@ cloudCACert: | # The name of the cloud to use from the specified clouds.yaml cloudName: openstack -# Values for the Kubernetes cluster network kubeNetwork: # By default, use the private network range 10.0.0.0/12 for the cluster network # We split it into two equally-sized blocks for pods and services @@ -141,27 +140,15 @@ kubeNetwork: # internal net on 172.16.x.y pods: cidrBlocks: - - 10.0.0.0/12 + - 10.0.0.0/13 services: cidrBlocks: - - 10.7.0.0/13 + - 10.8.0.0/13 serviceDomain: cluster.local -# Settings for the OpenStack networking for the cluster -clusterNetworking: - # Custom nameservers to use for the hosts - dnsNameservers: - - 130.246.209.132 - - 130.246.209.163 - - 130.246.208.220 - externalNetworkId: External - # Settings for registry mirrors registryMirrors: { docker.io: ["https://dockerhub.stfc.ac.uk"] } -# List of additional packages to install on cluster nodes -additionalPackages: [nfs-common, open-iscsi] - # Settings for the Kubernetes API server apiServer: # Indicates whether to deploy a load balancer for the API server @@ -172,13 +159,16 @@ apiServer: port: 6443 addons: - # Enable monitoring by default, this deploys - # https://github.com/stackhpc/capi-helm-charts/blob/main/charts/cluster-addons/README.md#monitoring-and-logging - # and includes Loki which is required for central logging as per UKRI policy monitoring: enabled: false ingress: enabled: true + nginx: + release: + values: + controller: + service: + loadBalancerIP: "130.246.81.192" openstack: enabled: true cni: diff --git a/c-api/staging-cluster/values.yaml b/c-api/staging-cluster/values.yaml index e677dc5..9ba42b8 100644 --- a/c-api/staging-cluster/values.yaml +++ b/c-api/staging-cluster/values.yaml @@ -141,27 +141,15 @@ kubeNetwork: # internal net on 172.16.x.y pods: cidrBlocks: - - 10.0.0.0/12 + - 10.0.0.0/13 services: cidrBlocks: - - 10.7.0.0/13 + - 10.8.0.0/13 serviceDomain: cluster.local -# Settings for the OpenStack networking for the cluster -clusterNetworking: - # Custom nameservers to use for the hosts - dnsNameservers: - - 130.246.209.132 - - 130.246.209.163 - - 130.246.208.220 - externalNetworkId: External - # Settings for registry mirrors registryMirrors: { docker.io: ["https://dockerhub.stfc.ac.uk"] } -# List of additional packages to install on cluster nodes -additionalPackages: [nfs-common, open-iscsi] - # Settings for the Kubernetes API server apiServer: # Indicates whether to deploy a load balancer for the API server @@ -172,13 +160,16 @@ apiServer: port: 6443 addons: - # Enable monitoring by default, this deploys - # https://github.com/stackhpc/capi-helm-charts/blob/main/charts/cluster-addons/README.md#monitoring-and-logging - # and includes Loki which is required for central logging as per UKRI policy monitoring: enabled: false ingress: enabled: true + nginx: + release: + values: + controller: + service: + loadBalancerIP: "130.246.81.122" openstack: enabled: true cni: