@@ -52,11 +52,11 @@ kubectl --kubeconfig single-server.rke2.yaml get nodes
52
52
# k8s-pool-a-1 Ready <none> 119s v1.21.5+rke2r2
53
53
# k8s-server-1 Ready control-plane,etcd,master 2m22s v1.21.5+rke2r2
54
54
55
+ # get SSH and restore helpers
56
+ terraform output -json
57
+
55
58
# on upgrade, process node pool by node pool
56
59
terraform apply -target=' module.rke2.module.servers["server-a"]'
57
- # for servers, apply on the majority of nodes, then for the remaining ones
58
- # this ensures the load balancer routes are updated as well
59
- terraform apply -target=' module.rke2.openstack_lb_members_v2.k8s'
60
60
```
61
61
62
62
See [ examples] ( ./examples ) for more options or this
@@ -68,78 +68,40 @@ Note: it requires [rsync](https://rsync.samba.org) and
68
68
can disable this behavior by setting ` ff_write_kubeconfig=false ` and fetch
69
69
yourself ` /etc/rancher/rke2/rke2.yaml ` on server nodes.
70
70
71
- ## Infomaniak OpenStack
72
-
73
- A stable, performant and fully equipped Kubernetes cluster in Switzerland for as
74
- little as CHF 26.90/month (at the time of writing):
75
-
76
- - load-balancer with floating IP (perfect under Cloudflare proxy)
77
- - 1 server 2cpu/4Go (= master)
78
- - 1 agent 2cpu/4Go (= worker)
79
-
80
- | Flavour | CHF/month |
81
- | ---------------------------------------------------------------------------------- | --------- |
82
- | 2×5.88 (instances) + 0.09×2×(4+6) (block storage) + 3.34 (IP) + 10 (load-balancer) | 26.90 |
83
- | single 2cpu/4go server with 1x4cpu/16Go worker | ~ 37.— |
84
- | 3x2cpu/4go HA servers with 1x4cpu/16Go worker | ~ 50.— |
85
- | 3x2cpu/4go HA servers with 3x4cpu/16Go workers | ~ 85.— |
86
-
87
- See their technical [ documentation] ( https://docs.infomaniak.cloud ) and
88
- [ pricing] ( https://www.infomaniak.com/fr/hebergement/public-cloud/tarifs ) .
89
-
90
- ## More on RKE2 & OpenStack
91
-
92
- [ RKE2 cheat sheet] ( https://gist.github.com/superseb/3b78f47989e0dbc1295486c186e944bf )
71
+ ## Restoring a backup
93
72
94
73
```
95
- # alias already set on the nodes
96
- crictl
97
- kubectl (server only)
98
-
99
- # logs
100
- sudo systemctl status rke2-server.service
101
- journalctl -f -u rke2-server
102
-
103
- sudo systemctl status rke2-agent.service
104
- journalctl -f -u rke2-agent
105
-
106
- less /var/lib/rancher/rke2/agent/logs/kubelet.log
107
- less /var/lib/rancher/rke2/agent/containerd/containerd.log
108
- less /var/log/cloud-init-output.log
109
-
110
- # restore s3 snapshot (see restore_cmd output of the terraform module)
74
+ # ssh into one of the server nodes (see terraform output -json)
75
+ # restore s3 snapshot (see restore_cmd output of the terraform module):
111
76
sudo systemctl stop rke2-server
112
77
sudo rke2 server --cluster-reset --etcd-s3 --etcd-s3-bucket=BUCKET_NAME --etcd-s3-access-key=ACCESS_KEY --etcd-s3-secret-key=SECRET_KEY --cluster-reset-restore-path=SNAPSHOT_PATH
113
78
sudo systemctl start rke2-server
114
- # remove db on other server nodes
79
+ # exit and ssh on the other server nodes to remove the etcd db
80
+ # (recall that you may need to ssh into one node as a bastion then to the others):
115
81
sudo systemctl stop rke2-server
116
- sudo rm -rf /var/lib/rancher/rke2/server/db
82
+ sudo rm -rf /var/lib/rancher/rke2/server
117
83
sudo systemctl start rke2-server
118
84
# reboot all nodes one by one to make sure all is stable
119
85
sudo reboot
120
-
121
- # check san
122
- openssl s_client -connect 192.168.42.3:10250 </dev/null 2>/dev/null | openssl x509 -inform pem -text
123
-
124
- # defrag etcd
125
- kubectl -n kube-system exec $(kubectl -n kube-system get pod -l component=etcd --no-headers -o custom-columns=NAME:.metadata.name | head -1) -- sh -c "ETCDCTL_ENDPOINTS='https://127.0.0.1:2379' ETCDCTL_CACERT='/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt' ETCDCTL_CERT='/var/lib/rancher/rke2/server/tls/etcd/server-client.crt' ETCDCTL_KEY='/var/lib/rancher/rke2/server/tls/etcd/server-client.key' ETCDCTL_API=3 etcdctl defrag --cluster"
126
-
127
- # increase volume size
128
- # shutdown instance
129
- # detach volumne
130
- # expand volume
131
- # recreate node
132
- terraform apply -target='module.rke2.module.servers["server"]' -replace='module.rke2.module.servers["server"].openstack_compute_instance_v2.instance[0]'
133
86
```
134
87
135
88
## Migration guide
136
89
137
90
### From v2 to v3
138
91
139
92
```
140
- # use the previous patch version to setup an additional san: 192.168.42.4
141
- # this will become the new VIP inside the cluster and replace the load-balancer
142
- # run an full upgrade with it, then switch to the new major, then you can upgrade
93
+ # use the previous patch version (2.0.7) to setup an additional san for 192.168.42.4
94
+ # this will become the new VIP inside the cluster and replace the load-balancer:
95
+ source = "zifeo/rke2/openstack"
96
+ version = "2.0.7"
97
+ # ...
98
+ additional_san = ["192.168.42.4"]
99
+ # run an full upgrade with it, node by node:
100
+ terraform apply -target='module.rke2.module.servers["your-server-pool"]'
101
+ # and so on for each node pool
102
+ # you can now switch to the new major:
103
+ source = "zifeo/rke2/openstack"
104
+ version = "3.0.0"
143
105
# 1. create the new external IP for servers access with:
144
106
terraform apply -target='module.rke2.openstack_networking_floatingip_associate_v2.fip'
145
107
# 2. pick a server different from the initial one (used to bootstrap):
@@ -257,3 +219,60 @@ terraform state rm module.rke2.openstack_networking_floatingip_v2.external
257
219
# 7. continues with other nodes step-by-step and ensure all is up-to-date with a final:
258
220
terraform apply
259
221
```
222
+
223
+ ## Infomaniak OpenStack
224
+
225
+ A stable, performant and fully equipped Kubernetes cluster in Switzerland for as
226
+ little as CHF 16.90/month (at the time of writing):
227
+
228
+ - 1 floating IP for admin access (ssh and kubernetes api)
229
+ - 1 server 2cpu/4Go (= master)
230
+ - 1 agent 2cpu/4Go (= worker)
231
+
232
+ | Flavour | CHF/month |
233
+ | ------------------------------------------------------------- | --------- |
234
+ | 2×5.88 (instances) + 0.09×2×(4+6) (block storage) + 3.34 (IP) | 16.90 |
235
+ | single 2cpu/4go server with 1x4cpu/16Go worker | ~ 27.— |
236
+ | 3x2cpu/4go HA servers with 1x4cpu/16Go worker | ~ 40.— |
237
+ | 3x2cpu/4go HA servers with 3x4cpu/16Go workers | ~ 75.— |
238
+
239
+ You may also want to add a load-balancer and bind an additional floating IP for
240
+ public access (e.g. for an ingress controller like ingress-nginx), that will add
241
+ 10.00 (load-balancer) + 3.34 (IP) = CHF 13.34/month.
242
+
243
+ See their technical [ documentation] ( https://docs.infomaniak.cloud ) and
244
+ [ pricing] ( https://www.infomaniak.com/fr/hebergement/public-cloud/tarifs ) .
245
+
246
+ ## More on RKE2 & OpenStack
247
+
248
+ [ RKE2 cheat sheet] ( https://gist.github.com/superseb/3b78f47989e0dbc1295486c186e944bf )
249
+
250
+ ```
251
+ # alias already set on the nodes
252
+ crictl
253
+ kubectl (server only)
254
+
255
+ # logs
256
+ sudo systemctl status rke2-server.service
257
+ journalctl -f -u rke2-server
258
+
259
+ sudo systemctl status rke2-agent.service
260
+ journalctl -f -u rke2-agent
261
+
262
+ less /var/lib/rancher/rke2/agent/logs/kubelet.log
263
+ less /var/lib/rancher/rke2/agent/containerd/containerd.log
264
+ less /var/log/cloud-init-output.log
265
+
266
+ # check san
267
+ openssl s_client -connect 192.168.42.3:10250 </dev/null 2>/dev/null | openssl x509 -inform pem -text
268
+
269
+ # defrag etcd
270
+ kubectl -n kube-system exec $(kubectl -n kube-system get pod -l component=etcd --no-headers -o custom-columns=NAME:.metadata.name | head -1) -- sh -c "ETCDCTL_ENDPOINTS='https://127.0.0.1:2379' ETCDCTL_CACERT='/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt' ETCDCTL_CERT='/var/lib/rancher/rke2/server/tls/etcd/server-client.crt' ETCDCTL_KEY='/var/lib/rancher/rke2/server/tls/etcd/server-client.key' ETCDCTL_API=3 etcdctl defrag --cluster"
271
+
272
+ # increase volume size
273
+ # shutdown instance
274
+ # detach volumne
275
+ # expand volume
276
+ # recreate node
277
+ terraform apply -target='module.rke2.module.servers["server"]' -replace='module.rke2.module.servers["server"].openstack_compute_instance_v2.instance[0]'
278
+ ```
0 commit comments