diff --git a/Dockerfile.windows b/Dockerfile.windows index 4b9afb8dbc..879de30f15 100644 --- a/Dockerfile.windows +++ b/Dockerfile.windows @@ -45,7 +45,9 @@ ARG KUBERNETES_VERSION=dev # windows runtime image ENV CRICTL_VERSION="v1.27.0" ENV CALICO_VERSION="v3.26.1" -ENV CNI_PLUGIN_VERSION="v1.1.1" +ENV CNI_PLUGIN_VERSION="v1.4.0" +ENV FLANNEL_VERSION="v0.24.2" +ENV CNI_FLANNEL_VERSION="v1.4.0-flannel1" RUN mkdir -p rancher @@ -87,6 +89,8 @@ RUN sha256sum -c kube-proxy.exe.sha256 RUN mv kube-proxy.exe rancher/ RUN curl -sLO https://github.com/projectcalico/calico/releases/download/${CALICO_VERSION}/calico-windows-${CALICO_VERSION}.zip +RUN curl -sL https://github.com/flannel-io/flannel/releases/download/${FLANNEL_VERSION}/flanneld.exe -o rancher/flanneld.exe +RUN curl -sL https://github.com/flannel-io/cni-plugin/releases/download/${CNI_FLANNEL_VERSION}/flannel-amd64.exe -o rancher/flannel.exe RUN curl -sL https://github.com/Microsoft/SDN/raw/master/Kubernetes/windows/hns.psm1 -o rancher/hns.psm1 RUN CONTAINERD_VERSION=$(grep "rancher/hardened-containerd" Dockerfile | grep ':v' | cut -d '=' -f 2- | grep -oE "([0-9]+)\.([0-9]+)\.([0-9]+)") \ diff --git a/bundle/bin/rke2-killall.sh b/bundle/bin/rke2-killall.sh index ce92b58ede..0ce58b5ea8 100755 --- a/bundle/bin/rke2-killall.sh +++ b/bundle/bin/rke2-killall.sh @@ -71,6 +71,7 @@ ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; done ip link delete cni0 ip link delete flannel.1 +ip link delete flannel.4096 ip link delete flannel-v6.1 ip link delete flannel-wg ip link delete flannel-wg-v6 diff --git a/bundle/bin/rke2-uninstall.ps1 b/bundle/bin/rke2-uninstall.ps1 index 239ea4806d..50b8b83818 100644 --- a/bundle/bin/rke2-uninstall.ps1 +++ b/bundle/bin/rke2-uninstall.ps1 @@ -102,7 +102,7 @@ function Invoke-HNSRequest { Write-Host "Beginning the uninstall process" function Stop-Processes () { - $ProcessNames = @('rke2', 'kube-proxy', 'kubelet', 'containerd', 'wins', 'calico-node') + $ProcessNames = @('rke2', 'kube-proxy', 'kubelet', 'containerd', 'wins', 'calico-node', 'flanneld') foreach ($ProcessName in $ProcessNames) { Write-LogInfo "Checking if $ProcessName process exists" if ((Get-Process -Name $ProcessName -ErrorAction SilentlyContinue)) { @@ -147,7 +147,7 @@ function Invoke-CleanServices () { function Reset-HNS () { try { - Get-HnsNetwork | Where-Object { $_.Name -eq 'Calico' -or $_.Name -eq 'vxlan0' -or $_.Name -eq 'nat' -or $_.Name -eq 'External' } | Select-Object Name, ID | ForEach-Object { + Get-HnsNetwork | Where-Object { $_.Name -eq 'Calico' -or $_.Name -eq 'vxlan0' -or $_.Name -eq 'nat' -or $_.Name -eq 'External' -or $_.Name -eq 'flannel.4096' } | Select-Object Name, ID | ForEach-Object { Write-LogInfo "Cleaning up HnsNetwork $($_.Name) ..." hnsdiag delete networks $($_.ID) } diff --git a/charts/chart_versions.yaml b/charts/chart_versions.yaml index 1f00b2e85f..d653781a5d 100644 --- a/charts/chart_versions.yaml +++ b/charts/chart_versions.yaml @@ -23,6 +23,9 @@ charts: - version: v4.0.2-build2023081108 filename: /charts/rke2-multus.yaml bootstrap: true + - version: v0.24.201 + filename: /charts/rke2-flannel.yaml + bootstrap: true - version: 1.5.100 filename: /charts/rancher-vsphere-cpi.yaml bootstrap: true diff --git a/pkg/cli/cmds/server.go b/pkg/cli/cmds/server.go index 2aa12e0a39..439a6dc855 100644 --- a/pkg/cli/cmds/server.go +++ b/pkg/cli/cmds/server.go @@ -17,7 +17,7 @@ const ( var ( DisableItems = []string{"rke2-coredns", "rke2-ingress-nginx", "rke2-metrics-server"} - CNIItems = []string{"calico", "canal", "cilium"} + CNIItems = []string{"calico", "canal", "cilium", "flannel"} config = rke2.Config{} diff --git a/pkg/pebinaryexecutor/pebinary.go b/pkg/pebinaryexecutor/pebinary.go index 3e0da37c2d..7c3c88ac99 100644 --- a/pkg/pebinaryexecutor/pebinary.go +++ b/pkg/pebinaryexecutor/pebinary.go @@ -62,10 +62,11 @@ type CloudProviderConfig struct { } const ( - CNINone = "none" - CNICalico = "calico" - CNICilium = "cilium" - CNICanal = "canal" + CNINone = "none" + CNICalico = "calico" + CNICilium = "cilium" + CNICanal = "canal" + CNIFlannel = "flannel" ) // Bootstrap prepares the binary executor to run components by setting the system default registry @@ -111,16 +112,21 @@ func (p *PEBinaryConfig) Bootstrap(ctx context.Context, nodeConfig *daemonconfig switch p.CNIName { case "", CNICalico: + logrus.Info("Setting up Calico CNI") p.CNIPlugin = &win.Calico{} - if err := p.CNIPlugin.Setup(ctx, nodeConfig, restConfig, p.DataDir); err != nil { - return err - } + case CNIFlannel: + logrus.Info("Setting up Flannel CNI") + p.CNIPlugin = &win.Flannel{} case CNINone: logrus.Info("Skipping CNI setup") default: logrus.Fatal("Unsupported CNI: ", p.CNIName) } + if err := p.CNIPlugin.Setup(ctx, nodeConfig, restConfig, p.DataDir); err != nil { + return err + } + // required to initialize KubeProxy p.KubeConfigKubeProxy = nodeConfig.AgentConfig.KubeConfigKubeProxy @@ -192,7 +198,6 @@ func (p *PEBinaryConfig) KubeProxy(ctx context.Context, args []string) error { } logrus.Infof("Reserved VIP for kube-proxy: %s", vip) - extraArgs := map[string]string{ "network-name": CNIConfig.OverlayNetName, "bind-address": CNIConfig.NodeIP, @@ -290,6 +295,8 @@ func getCNIPluginName(restConfig *rest.Config) (string, error) { switch h.Name { case win.CalicoChart: return CNICalico, nil + case win.FlannelChart: + return CNIFlannel, nil case "rke2-cilium": return CNICilium, nil case "rke2-canal": diff --git a/pkg/windows/calico.go b/pkg/windows/calico.go index 1851e0e9ac..7981232fdf 100644 --- a/pkg/windows/calico.go +++ b/pkg/windows/calico.go @@ -33,12 +33,6 @@ import ( ) var ( - replaceSlashWin = template.FuncMap{ - "replace": func(s string) string { - return strings.ReplaceAll(s, "\\", "\\\\") - }, - } - calicoKubeConfigTemplate = template.Must(template.New("Kubeconfig").Parse(`apiVersion: v1 kind: Config clusters: @@ -135,6 +129,7 @@ const ( calicoNode = "calico-node" ) +// GetConfig returns the CNI configuration func (c *Calico) GetConfig() *CNICommonConfig { return &c.CNICfg.CNICommonConfig } @@ -166,7 +161,7 @@ func (c *Calico) initializeConfig(ctx context.Context, nodeConfig *daemonconfig. c.CNICfg = &CalicoConfig{ CNICommonConfig: CNICommonConfig{ - Name: "Calico", + Name: "calico", OverlayNetName: "Calico", OverlayEncap: "vxlan", Hostname: nodeConfig.AgentConfig.NodeName, @@ -374,6 +369,7 @@ func (c *Calico) overrideCalicoConfigByHelm(restConfig *rest.Config) error { return nil } +// findCalicoInterface finds the interface to use for Calico based on the NodeAddressAutodetectionV4 func findCalicoInterface(nodeV4 *opv1.NodeAddressAutodetection) (IPAutoDetectionMethod, calicoInterface string, err error) { IPAutoDetectionMethod, err = nodeAddressAutodetection(*nodeV4) if err != nil { @@ -402,6 +398,7 @@ func findCalicoInterface(nodeV4 *opv1.NodeAddressAutodetection) (IPAutoDetection return } +// startConfd starts the confd service (for BGP) func startConfd(ctx context.Context, config *CalicoConfig, logPath string) { outputFile := logging.GetLogger(filepath.Join(logPath, "confd.log"), 50) @@ -424,6 +421,7 @@ func startConfd(ctx context.Context, config *CalicoConfig, logPath string) { logrus.Error("Confd exited") } +// startFelix starts the felix service func startFelix(ctx context.Context, config *CalicoConfig, logPath string) { outputFile := logging.GetLogger(filepath.Join(logPath, "felix.log"), 50) @@ -453,6 +451,7 @@ func startFelix(ctx context.Context, config *CalicoConfig, logPath string) { logrus.Error("Felix exited") } +// startCalico starts the calico service func startCalico(ctx context.Context, config *CalicoConfig, logPath string) error { outputFile := logging.GetLogger(filepath.Join(logPath, "calico-node.log"), 50) diff --git a/pkg/windows/flannel.go b/pkg/windows/flannel.go new file mode 100644 index 0000000000..9bf1715d17 --- /dev/null +++ b/pkg/windows/flannel.go @@ -0,0 +1,367 @@ +//go:build windows +// +build windows + +package windows + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" + "time" + + "github.com/Microsoft/hcsshim" + daemonconfig "github.com/k3s-io/k3s/pkg/daemons/config" + "github.com/k3s-io/k3s/pkg/version" + "github.com/pkg/errors" + "github.com/rancher/rke2/pkg/logging" + "github.com/sirupsen/logrus" + authv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "k8s.io/utils/pointer" +) + +var ( + flannelKubeConfigTemplate = template.Must(template.New("FlannelKubeconfig").Parse(`apiVersion: v1 +kind: Config +clusters: +- name: kubernetes + cluster: + certificate-authority: {{ .KubeConfig.CertificateAuthority }} + server: {{ .KubeConfig.Server }} +contexts: +- name: flannel@kubernetes + context: + cluster: kubernetes + namespace: kube-system + user: flannel +current-context: flannel@kubernetes +users: +- name: flannel + user: + token: {{ .KubeConfig.Token }} +`)) + + // Flannel net-conf for flanneld + flanneldConfigTemplate = template.Must(template.New("FlanneldCnfig").Funcs(replaceSlashWin).Parse(`{ +"Network": "{{ .ClusterCIDR }}", +"Backend": { + "Type": "{{ .OverlayEncap }}", + "VNI": {{ .VxlanVNI }}, + "Port": {{ .VxlanPort }} + } +}`)) + + flannelCniConflistTemplate = template.Must(template.New("FlannelCniConfig").Funcs(replaceSlashWin).Parse(`{ + "name":"flannel.4096", + "cniVersion":"{{ .CNIVersion }}", + "plugins":[ + { + "type":"flannel", + "capabilities": { + "portMappings": true, + "dns": true + }, + "delegate": { + "type": "win-overlay", + "apiVersion": 2, + "Policies": [{ + "Name": "EndpointPolicy", + "Value": { + "Type": "OutBoundNAT", + "Settings": { + "Exceptions": [ + "{{ .ClusterCIDR }}", "{{ .ServiceCIDR }}" + ] + } + } + }, { + "Name": "EndpointPolicy", + "Value": { + "Type": "SDNRoute", + "Settings": { + "DestinationPrefix": "{{ .ServiceCIDR }}", + "NeedEncap": true + } + } + }, { + "name": "EndpointPolicy", + "value": { + "Type": "ProviderAddress", + "Settings": { + "ProviderAddress": "{{ .NodeIP }}" + } + } + }] + } + } + ] + } +`)) +) + +type Flannel struct { + CNICfg *FlannelConfig + KubeClient *kubernetes.Clientset +} + +type SourceVipResponse struct { + IP4 struct { + IP string `json:"ip"` + } `json:"ip4"` +} + +const ( + FlannelConfigName = "07-flannel.conflist" + FlannelKubeConfigName = "flannel.kubeconfig" + FlanneldConfigName = "flanneld-net-conf.json" + FlannelChart = "rke2-flannel" +) + +// GetConfig returns the CNI configuration +func (f *Flannel) GetConfig() *CNICommonConfig { + return &f.CNICfg.CNICommonConfig +} + +// Setup creates the basic configuration required by the CNI. +func (f *Flannel) Setup(ctx context.Context, nodeConfig *daemonconfig.Node, restConfig *rest.Config, dataDir string) error { + + if err := f.initializeConfig(ctx, nodeConfig, restConfig, dataDir); err != nil { + return err + } + + if err := f.writeConfigFiles(); err != nil { + return err + } + + logrus.Info("Flannel required config files ready") + return nil +} + +// initializeConfig sets the default configuration in CNIConfig +func (f *Flannel) initializeConfig(ctx context.Context, nodeConfig *daemonconfig.Node, restConfig *rest.Config, dataDir string) error { + var err error + + f.CNICfg = &FlannelConfig{ + CNICommonConfig: CNICommonConfig{ + Name: "flannel", + OverlayNetName: "flannel.4096", + Hostname: nodeConfig.AgentConfig.NodeName, + ConfigPath: filepath.Join("c:\\", dataDir, "agent"), + OverlayEncap: "vxlan", + VxlanVNI: "4096", + VxlanPort: "4789", + ServiceCIDR: nodeConfig.AgentConfig.ServiceCIDR.String(), + ClusterCIDR: nodeConfig.AgentConfig.ClusterCIDR.String(), + CNIConfDir: nodeConfig.AgentConfig.CNIConfDir, + NodeIP: nodeConfig.AgentConfig.NodeIP, + CNIBinDir: nodeConfig.AgentConfig.CNIBinDir, + CNIVersion: "1.0.0", + Interface: nodeConfig.AgentConfig.NodeIP, + }, + } + + f.CNICfg.KubeConfig, f.KubeClient, err = f.createKubeConfigAndClient(ctx, restConfig) + if err != nil { + return err + } + + logrus.Debugf("Flannel Config: %+v", f.CNICfg) + + return nil +} + +// writeConfigFiles writes the three required files by Flannel +func (f *Flannel) writeConfigFiles() error { + + // Create flannelKubeConfig + if err := f.renderFlannelConfig(filepath.Join(f.CNICfg.ConfigPath, FlannelKubeConfigName), flannelKubeConfigTemplate); err != nil { + return err + } + + // Create flanneld config + if err := f.renderFlannelConfig(filepath.Join(f.CNICfg.ConfigPath, FlanneldConfigName), flanneldConfigTemplate); err != nil { + return err + } + + // Create flannel CNI conflist + if err := f.renderFlannelConfig(filepath.Join(f.CNICfg.CNIConfDir, FlannelConfigName), flannelCniConflistTemplate); err != nil { + return err + } + + return nil +} + +// renderFlannelConfig creates the file and then renders the template using Flannel Config parameters +func (f *Flannel) renderFlannelConfig(path string, toRender *template.Template) error { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + output, err := os.Create(path) + if err != nil { + return err + } + + defer output.Close() + toRender.Execute(output, f.CNICfg) + + return nil +} + +// createKubeConfig creates all needed for Flannel to contact kube-api +func (f *Flannel) createKubeConfigAndClient(ctx context.Context, restConfig *rest.Config) (*KubeConfig, *kubernetes.Clientset, error) { + + // Fill all information except for the token + flannelKubeConfig := KubeConfig{ + Server: "https://127.0.0.1:6443", + CertificateAuthority: filepath.Join(f.CNICfg.ConfigPath, "server-ca.crt"), + } + + // Generate the token request + req := authv1.TokenRequest{ + Spec: authv1.TokenRequestSpec{ + Audiences: []string{version.Program}, + ExpirationSeconds: pointer.Int64(60 * 60 * 24 * 365), + }, + } + + // Register the token in the Flannel service account + client, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, nil, err + } + serviceAccounts := client.CoreV1().ServiceAccounts("kube-system") + token, err := serviceAccounts.CreateToken(ctx, "flannel", &req, metav1.CreateOptions{}) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to create token for service account (kube-system/flannel)") + } + + flannelKubeConfig.Token = token.Status.Token + + return &flannelKubeConfig, client, nil +} + +// Start waits for the node to be ready and starts flannel +func (f *Flannel) Start(ctx context.Context) error { + logPath := filepath.Join(f.CNICfg.ConfigPath, "logs", "flanneld.log") + + // Wait for the node to be registered in the cluster + wait.PollImmediateWithContext(ctx, 3*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) { + _, err := f.KubeClient.CoreV1().Nodes().Get(ctx, f.CNICfg.Hostname, metav1.GetOptions{}) + if err != nil { + logrus.WithError(err).Warningf("Flanneld can't start because it can't find node, retrying %s", f.CNICfg.Hostname) + return false, nil + } else { + logrus.Infof("Node %s registered. Flanneld can start", f.CNICfg.Hostname) + return true, nil + } + }) + + go startFlannel(ctx, f.CNICfg, logPath) + + return nil +} + +// startFlannel calls the flanneld binary with the correct config parameters and envs +func startFlannel(ctx context.Context, config *FlannelConfig, logPath string) { + outputFile := logging.GetLogger(logPath, 50) + + specificEnvs := []string{ + fmt.Sprintf("NODE_NAME=%s", config.Hostname), + fmt.Sprintf("PATH=%s", os.Getenv("PATH")), + } + + args := []string{ + fmt.Sprintf("--kubeconfig-file=%s", filepath.Join(config.ConfigPath, FlannelKubeConfigName)), + "--ip-masq", + "--kube-subnet-mgr", + "--iptables-forward-rules=false", + fmt.Sprintf("--iface=%s", config.Interface), + fmt.Sprintf("--net-config-path=%s", filepath.Join(config.ConfigPath, FlanneldConfigName)), + } + + logrus.Infof("Flanneld Envs: %s and args: %v", specificEnvs, args) + cmd := exec.CommandContext(ctx, "flanneld.exe", args...) + cmd.Env = append(specificEnvs) + cmd.Stdout = outputFile + cmd.Stderr = outputFile + if err := cmd.Run(); err != nil { + logrus.Errorf("Flanneld has an error: %v. Check %s for extra information", err, logPath) + } + logrus.Error("Flanneld exited") +} + +// ReserveSourceVip reserves an IP that will be used as source VIP by kube-proxy. It uses host-local CNI plugin to reserve the IP +func (f *Flannel) ReserveSourceVip(ctx context.Context) (string, error) { + var network *hcsshim.HNSNetwork + var err error + + logrus.Info("Reserving an IP on flannel HNS network for kube-proxy source vip") + wait.PollImmediateWithContext(ctx, 10*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) { + network, err = hcsshim.GetHNSNetworkByName(f.CNICfg.OverlayNetName) + if err != nil || network == nil { + logrus.Debugf("can't find flannel HNS network, retrying %s", f.CNICfg.OverlayNetName) + return false, nil + } + + if network.ManagementIP == "" { + logrus.Debugf("wait for flannel HNS network management IP, retrying %s", f.CNICfg.OverlayNetName) + return false, nil + } + + if network.ManagementIP != "" { + logrus.Infof("Flannel HNS network ready with managementIP: %s", network.ManagementIP) + return true, nil + } + return false, nil + }) + + subnet := network.Subnets[0].AddressPrefix + + logrus.Debugf("host-local will use the following subnet: %v to reserve the sourceIP", subnet) + + configData := `{ + "cniVersion": "0.2.0", + "name": "vxlan0", + "ipam": { + "type": "host-local", + "ranges": [[{"subnet":"` + subnet + `"}]], + "dataDir": "/var/lib/cni/networks" + } + }` + + cmd := exec.Command("host-local.exe") + cmd.Env = append(os.Environ(), + "CNI_COMMAND=ADD", + "CNI_CONTAINERID=dummy", + "CNI_NETNS=dummy", + "CNI_IFNAME=dummy", + "CNI_PATH="+f.CNICfg.CNIBinDir, + ) + + cmd.Stdin = strings.NewReader(configData) + out, err := cmd.CombinedOutput() + if err != nil { + logrus.WithError(err).Warning("Failed to execute host-local.exe") + logrus.Errorf("This is the output: %v", strings.TrimSpace(string(out))) + return "", err + } + + var sourceVipResp SourceVipResponse + err = json.Unmarshal(out, &sourceVipResp) + if err != nil { + logrus.WithError(err).Warning("Failed to unmarshal sourceVip response") + logrus.Infof("This is the error: %v", err) + return "", err + } + + return strings.TrimSpace(strings.Split(sourceVipResp.IP4.IP, "/")[0]), nil +} diff --git a/pkg/windows/types.go b/pkg/windows/types.go index c2380b2ccb..803165168e 100644 --- a/pkg/windows/types.go +++ b/pkg/windows/types.go @@ -26,26 +26,26 @@ type KubeConfig struct { } type CNICommonConfig struct { - Name string - OverlayNetName string - OverlayEncap string - Hostname string - ConfigPath string - CNIConfDir string - CNIBinDir string - ClusterCIDR string - ServiceCIDR string - NodeIP string - VxlanVNI string - VxlanPort string - Interface string - IpamType string - CNIVersion string - KubeConfig *KubeConfig + Name string + OverlayNetName string + OverlayEncap string + Hostname string + ConfigPath string + CNIConfDir string + CNIBinDir string + ClusterCIDR string + ServiceCIDR string + NodeIP string + VxlanVNI string + VxlanPort string + Interface string + IpamType string + CNIVersion string + KubeConfig *KubeConfig } type CalicoConfig struct { - CNICommonConfig // embedded struct + CNICommonConfig // embedded struct KubeNetwork string DNSServers string DNSSearch string @@ -59,6 +59,10 @@ type CalicoConfig struct { ETCDCaCertFile string } +type FlannelConfig struct { + CNICommonConfig // embedded struct +} + // Stub of Calico configuration used to extract user-provided overrides // Based off of https://github.com/tigera/operator/blob/master/api/v1/installation_types.go type CalicoInstallation struct { diff --git a/pkg/windows/utils.go b/pkg/windows/utils.go index 6a6ed2e3ea..90dd475146 100644 --- a/pkg/windows/utils.go +++ b/pkg/windows/utils.go @@ -11,6 +11,7 @@ import ( "net/url" "regexp" "strings" + "text/template" "time" "github.com/Microsoft/hcsshim" @@ -22,6 +23,14 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) +var ( + replaceSlashWin = template.FuncMap{ + "replace": func(s string) string { + return strings.ReplaceAll(s, "\\", "\\\\") + }, + } +) + // createHnsNetwork creates the network that will connect nodes and returns its managementIP func createHnsNetwork(backend string, networkAdapter string) (string, error) { var network hcsshim.HNSNetwork diff --git a/scripts/build-images b/scripts/build-images index 7efce12176..d4a498ed35 100755 --- a/scripts/build-images +++ b/scripts/build-images @@ -96,6 +96,11 @@ xargs -n1 -t docker image pull --quiet << EOF > build/images-harvester.txt ${REGISTRY}/rancher/mirrored-longhornio-csi-provisioner:v2.1.2 ${REGISTRY}/rancher/mirrored-longhornio-csi-attacher:v3.2.1 EOF + +xargs -n1 -t docker image pull --quiet << EOF > build/images-flannel.txt + ${REGISTRY}/rancher/hardened-flannel:v0.24.2-build20240122 + ${REGISTRY}/rancher/hardened-cni-plugins:v1.4.0-build20240122 +EOF fi # Continue to provide a legacy airgap archive set with the default CNI images cat build/images-core.txt build/images-canal.txt > build/images.txt