diff --git a/cmd/seeder/README.md b/cmd/seeder/README.md new file mode 100644 index 0000000..1a6b803 --- /dev/null +++ b/cmd/seeder/README.md @@ -0,0 +1,55 @@ +# seeder-cli +seeder plugin allows automation of some routine tasks with seeder. +It can be used as a kubectl plugin by placing it in your path, and renaming the binary as kubectl-seeder or a standalone binary + +Currently supported sub commands are: +* gen-kubeconfig: will generate an admin kubeconfig for a harvester cluster provisioned via seeder +* create-cluster: will create a new cluster object with some basic options +* recreate-cluster: will delete and re-create the cluster and patch the version if one is supplied + +## gen-kubeconfig +```shell +Usage: + seeder gen-kubeconfig $CLUSTER_NAME [flags] + +Flags: + -h, --help help for gen-kubeconfig + -p, --path string path to place generated harvester cluster kubeconfig + +Global Flags: + -d, --debug enable debug logging + -n, --namespace string namespace +``` + +## create-cluster +```shell +Usage: + seeder create-cluster $CLUSTER_NAME [options] [flags] + +Flags: + --address-pool string addresspool to be used for address allocation for VIP and inventory nodes + --config-url string [optional] location of common harvester config that will be applied to all nodes + -h, --help help for create-cluster + --image-url string [optional] location where artifacts for pxe booting inventory are present + --inventory strings list of inventory objects in namespace to be used for cluster + --static-vip string [optional] static address for harvester cluster vip (optional). If not specified an address from addresspool will be used + -v, --version string version of harvester + +Global Flags: + -d, --debug enable debug logging + -n, --namespace string namespace +``` + +## recreate-cluster +```sbell +Usage: + seeder recreate-cluster $CLUSTER_NAME [flags] + +Flags: + -h, --help help for recreate-cluster + -v, --version string [optional] version to use to recreate cluster + +Global Flags: + -d, --debug enable debug logging + -n, --namespace string namespace +``` \ No newline at end of file diff --git a/cmd/seeder/main.go b/cmd/seeder/main.go new file mode 100644 index 0000000..9622ae7 --- /dev/null +++ b/cmd/seeder/main.go @@ -0,0 +1,10 @@ +package main + +import ( + "github.com/harvester/seeder/cmd/seeder/pkg/plugin" + command "github.com/rancher/wrangler-cli" +) + +func main() { + command.Main(plugin.New()) +} diff --git a/cmd/seeder/pkg/plugin/createcluster.go b/cmd/seeder/pkg/plugin/createcluster.go new file mode 100644 index 0000000..0420699 --- /dev/null +++ b/cmd/seeder/pkg/plugin/createcluster.go @@ -0,0 +1,232 @@ +package plugin + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + command "github.com/rancher/wrangler-cli" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +type CreateCluster struct { + Version string `usage:"version of harvester" short:"v"` + Inventory []string `usage:"list of inventory objects in namespace to be used for cluster"` + AddressPool string `usage:"addresspool to be used for address allocation for VIP and inventory nodes"` + StaticVIP string `usage:"[optional] static address for harvester cluster vip (optional). If not specified an address from addresspool will be used"` + ConfigURL string `usage:"[optional] location of common harvester config that will be applied to all nodes"` + ImageURL string `usage:"[optional] location where artifacts for pxe booting inventory are present"` +} + +var ( + clusterName string + createClusterPreflightError = errors.New("pre-flight errors detected") +) + +func NewCreateCluster() *cobra.Command { + cc := command.Command(&CreateCluster{}, cobra.Command{ + Short: "create cluster", + Long: `create-cluster will create a new cluster.metal.harvesterhci.io object from the flags provided. +It acts as a simple wrapper around the yaml based cluster definition, and aims to be a quick start for provisioning clusters. +For more advanced use cases where additional options need to be provided, please use the yaml based cluster definition method`, + Use: "create-cluster $CLUSTER_NAME [options]", + Args: cobra.ExactArgs(1), + }) + return cc +} + +func (c *CreateCluster) Run(cmd *cobra.Command, args []string) error { + logrus.Debug(args) + err := c.preflightchecks(cmd, args) + if err != nil { + return err + } + cmd.Println(genHeaderMessage(fmt.Sprintf("creating new cluster %s", clusterName))) + err = c.createCluster(cmd) + if err != nil { + return err + } + + cmd.Println(genHeaderMessage(fmt.Sprintf("cluster %s created", clusterName))) + return nil +} + +// Pre-Run will check if flags are set +func (c *CreateCluster) Pre(cmd *cobra.Command, args []string) error { + // check flags are set + var err error + requiredFlags := []string{"address-pool", "inventory", "version"} + for _, rf := range requiredFlags { + if flagErr := cmd.MarkFlagRequired(rf); flagErr != nil { + err = errors.Wrap(err, flagErr.Error()) + } + } + + return err +} + +func (c *CreateCluster) preflightchecks(cmd *cobra.Command, args []string) error { + type preFlightFuncs func(*cobra.Command) (bool, error) + cmd.Println(genHeaderMessage("running pre-flight checks for create-cluster")) + clusterName = args[0] + checkList := []preFlightFuncs{ + c.inventoryExists, + c.addressPoolExists, + c.clusterExists, + } + + var preFlightFailures bool + for _, v := range checkList { + ok, err := v(cmd) + if err != nil { + return err + } + preFlightFailures = preFlightFailures || ok + } + + if preFlightFailures { + cmd.PrintErrln(genFailMessage("one or more pre-flight checks failed")) + return createClusterPreflightError + } + + return nil +} + +func (c *CreateCluster) inventoryExists(cmd *cobra.Command) (bool, error) { + var preCheckFailed bool + for _, i := range c.Inventory { + invObj := &seederv1alpha1.Inventory{} + err := mgmtClient.Get(cmd.Context(), types.NamespacedName{Namespace: namespace, Name: i}, invObj) + if err != nil { + if apierrors.IsNotFound(err) { + preCheckFailed = true + cmd.Println(genFailMessage(fmt.Sprintf("πŸ–₯ unable to find inventory %s in namespace %s", i, namespace))) + continue + } else { + return false, err + } + } + + if invObj.Status.Cluster.Name != "" { + preCheckFailed = true + cmd.Println(genFailMessage(fmt.Sprintf("πŸ–₯ already allocated to cluster %s in namespace %s", invObj.Status.Cluster.Name, + namespace))) + continue + } + + if invObj.Status.Status != seederv1alpha1.InventoryReady { + preCheckFailed = true + cmd.Println(genFailMessage(fmt.Sprintf("πŸ–₯ inventory %s in namespace %s is not ready for allocation", i, + namespace))) + continue + } + + cmd.Println(genPassMessage(fmt.Sprintf("πŸ–₯ inventory %s in namespace %s is ready", i, + namespace))) + + } + + return preCheckFailed, nil +} + +func (c *CreateCluster) addressPoolExists(cmd *cobra.Command) (bool, error) { + addObj := &seederv1alpha1.AddressPool{} + err := mgmtClient.Get(cmd.Context(), types.NamespacedName{Namespace: namespace, Name: c.AddressPool}, addObj) + if err != nil { + if apierrors.IsNotFound(err) { + cmd.Println(genFailMessage(fmt.Sprintf("πŸ–₯ unable to find addresspool %s in namespace %s", c.AddressPool, namespace))) + return true, nil + } else { + return false, err + } + } + + if addObj.Status.Status != seederv1alpha1.PoolReady { + cmd.Println(genFailMessage(fmt.Sprintf("πŸ–₯ addresspool %s in namespace %s is not ready", c.AddressPool, namespace))) + return true, nil + } + + cmd.Println(genPassMessage(fmt.Sprintf("πŸ–₯ addresspool %s in namespace %s is ready", c.AddressPool, namespace))) + + return false, nil +} + +func (c *CreateCluster) clusterExists(cmd *cobra.Command) (bool, error) { + clusterObj := &seederv1alpha1.Cluster{} + err := mgmtClient.Get(cmd.Context(), types.NamespacedName{Namespace: namespace, Name: clusterName}, clusterObj) + if err != nil { + if apierrors.IsNotFound(err) { + cmd.Println(genPassMessage(fmt.Sprintf("πŸ–₯ no cluster %s exists in namespace %s", clusterName, namespace))) + return false, nil + } else { + return false, err + } + } + cmd.Println(genFailMessage(fmt.Sprintf("πŸ–₯ cluster %s already exists in namespace %s", clusterName, namespace))) + return true, nil +} + +func (c *CreateCluster) generateCluster() *seederv1alpha1.Cluster { + cluster := &seederv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: seederv1alpha1.ClusterSpec{ + HarvesterVersion: c.Version, + ClusterConfig: seederv1alpha1.ClusterConfig{ + ConfigURL: c.ConfigURL, + }, + }, + } + + if c.ImageURL != "" { + cluster.Spec.ImageURL = c.ImageURL + } + + var nodes []seederv1alpha1.NodeConfig + for _, v := range c.Inventory { + nodes = append(nodes, seederv1alpha1.NodeConfig{ + InventoryReference: seederv1alpha1.ObjectReference{ + Name: v, + Namespace: namespace, + }, + AddressPoolReference: seederv1alpha1.ObjectReference{ + Name: c.AddressPool, + Namespace: namespace, + }, + }) + } + + vipConfig := seederv1alpha1.VIPConfig{ + AddressPoolReference: seederv1alpha1.ObjectReference{ + Name: c.AddressPool, + Namespace: namespace, + }, + } + + if c.StaticVIP != "" { + vipConfig.StaticAddress = c.StaticVIP + } + + cluster.Spec.Nodes = nodes + cluster.Spec.VIPConfig = vipConfig + return cluster +} + +func (c *CreateCluster) createCluster(cmd *cobra.Command) error { + + cluster := c.generateCluster() + err := mgmtClient.Create(cmd.Context(), cluster) + if err != nil { + return err + } + cmd.Println(genPassMessage(fmt.Sprintf("cluster %s submitted successfully", clusterName))) + return nil +} diff --git a/cmd/seeder/pkg/plugin/createcluster_test.go b/cmd/seeder/pkg/plugin/createcluster_test.go new file mode 100644 index 0000000..7cb31ee --- /dev/null +++ b/cmd/seeder/pkg/plugin/createcluster_test.go @@ -0,0 +1,65 @@ +package plugin + +import ( + "testing" + + seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1" + "k8s.io/apimachinery/pkg/types" + + "github.com/harvester/seeder/pkg/mock" + "github.com/stretchr/testify/require" + + "github.com/spf13/cobra" +) + +func Test_CommandCreateClusterPass(t *testing.T) { + var err error + cmd := &cobra.Command{} + inv := []string{"inventory-1"} + addPool := "mock-pool" + namespace = "default" + imageURL := "http://localhost/iso" + + c := &CreateCluster{ + Version: "v1.0.3", + Inventory: inv, + AddressPool: addPool, + ImageURL: imageURL, + } + assert := require.New(t) + mgmtClient, err = mock.GenerateFakeClient() + assert.NoError(err, "expected no error during generation of mock client") + err = c.preflightchecks(cmd, []string{"mock-cluster"}) + assert.NoError(err, "expected no error during preflightchecks") + + err = c.createCluster(cmd) + assert.NoError(err, "expected no error during cluster creation") + clusterObj := &seederv1alpha1.Cluster{} + err = mgmtClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, clusterObj) + assert.NoError(err, "expect no error looking up cluster") + assert.Equal(addPool, clusterObj.Spec.VIPConfig.AddressPoolReference.Name, "expected vip addresspools to match") + assert.Len(clusterObj.Spec.Nodes, 1, "expected to find one node") + assert.Equal(addPool, clusterObj.Spec.Nodes[0].AddressPoolReference.Name, "expected node address pools to match") +} + +func Test_CommandCreateClusterMissingInventory(t *testing.T) { + var err error + cmd := &cobra.Command{} + inv := []string{"inventory-3"} + addPool := "mock-pool" + namespace = "default" + imageURL := "http://localhost/iso" + + c := &CreateCluster{ + Version: "v1.0.3", + Inventory: inv, + AddressPool: addPool, + ImageURL: imageURL, + } + assert := require.New(t) + mgmtClient, err = mock.GenerateFakeClient() + assert.NoError(err, "expected no error during generation of mock client") + err = c.preflightchecks(cmd, []string{"mock-cluster"}) + assert.Error(err, "expected no error during preflightchecks") + assert.ErrorIs(err, createClusterPreflightError) +} diff --git a/cmd/seeder/pkg/plugin/genkubeconfig.go b/cmd/seeder/pkg/plugin/genkubeconfig.go new file mode 100644 index 0000000..5c21f2e --- /dev/null +++ b/cmd/seeder/pkg/plugin/genkubeconfig.go @@ -0,0 +1,147 @@ +package plugin + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/mitchellh/go-homedir" + + "github.com/harvester/seeder/pkg/util" + + "k8s.io/apimachinery/pkg/runtime" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/types" + + "sigs.k8s.io/controller-runtime/pkg/client" + + seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1" + + command "github.com/rancher/wrangler-cli" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var ( + runningClusters, notRunningClusters, missingClusters []string + mgmtClient client.Client + ctx context.Context + scheme = runtime.NewScheme() + namespace string + runningClusterObjs []seederv1alpha1.Cluster +) + +func NewGenKubeConfig() *cobra.Command { + gkc := command.Command(&GenKubeconfig{}, cobra.Command{ + Short: "generate kubeconfig", + Long: `gen-kubeconfig will leverage the kubeconfig for a seeder cluster and generate a new kubeconfig for the +target Harvester cluster being provisioned and managed via seeder. +The kubeconfig will be placed in $HOME/.kube unless an alternate path is specified via --path flag. +The name of the generated config file will be same as the name of the cluster.metal.harvesterhci.io object`, + Use: "gen-kubeconfig $CLUSTER_NAME", + Args: cobra.MinimumNArgs(1), + }) + return gkc +} + +type GenKubeconfig struct { + Path string `usage:"path to place generated harvester cluster kubeconfig" short:"p"` +} + +func (g *GenKubeconfig) Run(cmd *cobra.Command, args []string) error { + ctx = cmd.Context() + logrus.Debugf("args passsed: %v", args) + err := g.preflightchecks(cmd, args) + if err != nil { + return err + } + return g.generateKubeConfig(cmd) +} + +// preflightchecks will check if the seeder provisioned cluster is in correct state +// and has appropriate information available before attemping kubeconfig generation +func (g *GenKubeconfig) preflightchecks(cmd *cobra.Command, args []string) error { + for _, v := range args { + cluster := &seederv1alpha1.Cluster{} + err := mgmtClient.Get(ctx, types.NamespacedName{Name: v, Namespace: namespace}, cluster) + if err != nil { + if apierrors.IsNotFound(err) { + missingClusters = append(missingClusters, v) + continue + } else { + return fmt.Errorf("error looking up cluster %s: %v", v, err) + } + } + + if cluster.Status.Status == seederv1alpha1.ClusterRunning { + runningClusters = append(runningClusters, v) + runningClusterObjs = append(runningClusterObjs, *cluster) + } else { + notRunningClusters = append(notRunningClusters, v) + } + } + + cmd.Println(genHeaderMessage("running pre-flight checks for gen-kubeconfig")) + for _, v := range runningClusters { + cmd.Println(genPassMessage(fmt.Sprintf("cluster %s is running", v))) + } + + for _, v := range missingClusters { + cmd.Println(genFailMessage(fmt.Sprintf("cluster %s not found", v))) + } + + for _, v := range notRunningClusters { + cmd.Println(genFailMessage(fmt.Sprintf("cluster %s not running", v))) + } + + return nil + +} + +func (g *GenKubeconfig) generateKubeConfig(cmd *cobra.Command) error { + currentLogLevel := logrus.GetLevel() + defer logrus.SetLevel(currentLogLevel) + // change log levels temporarily to suppress in build messages from dynamic listener + // https://github.com/rancher/dynamiclistener/blob/v0.3.3/cert/cert.go#L138 + logrus.SetLevel(logrus.ErrorLevel) + + if len(runningClusterObjs) == 0 { + cmd.Println(genHeaderMessage("no running clusters specified. no action needed.")) + return nil + } + + // identify path to write files to + path := g.Path + if g.Path == "" { + home, err := homedir.Dir() + if err != nil { + return fmt.Errorf("error evaluating home dir: %v", err) + } + path = filepath.Join(home, ".kube") + } + + cmd.Println(genHeaderMessage("generating kubeconfig for running clusters")) + for _, v := range runningClusterObjs { + port, ok := v.Labels[seederv1alpha1.OverrideAPIPortLabel] + if !ok { + port = seederv1alpha1.DefaultAPIPort + } + kcBytes, err := util.GenerateKubeConfig(v.Status.ClusterAddress, port, seederv1alpha1.DefaultAPIPrefix, + v.Status.ClusterToken) + if err != nil { + return fmt.Errorf("error generating kubeconfig for cluster %s: %v", v.Name, err) + } + + fileName := filepath.Join(path, fmt.Sprintf("%s-%s.yaml", v.Name, namespace)) + err = os.WriteFile(fileName, kcBytes, 0600) + if err != nil { + return fmt.Errorf("error writing kubeconfig for %s: %v", v.Name, err) + } + + cmd.Println(genPassMessage(fmt.Sprintf("kubeconfig %s-%s.yaml written at %s", v.Name, namespace, path))) + } + return nil +} diff --git a/cmd/seeder/pkg/plugin/genkubeconfig_test.go b/cmd/seeder/pkg/plugin/genkubeconfig_test.go new file mode 100644 index 0000000..82a614f --- /dev/null +++ b/cmd/seeder/pkg/plugin/genkubeconfig_test.go @@ -0,0 +1,127 @@ +package plugin + +import ( + "context" + "fmt" + "log" + "net/url" + "os" + "path/filepath" + "testing" + "time" + + "github.com/harvester/seeder/pkg/mock" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "k8s.io/apimachinery/pkg/types" + + seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1" + + "github.com/ory/dockertest/v3/docker/pkg/ioutils" + + "github.com/spf13/cobra" + + "github.com/stretchr/testify/require" +) + +var ( + port string + docker_host string +) + +const ( + token = "token" +) + +func Test_CommandGenerateKubeConfig(t *testing.T) { + ctx = context.TODO() + assert := require.New(t) + + // setup a k3s server in docker using dockertest + pool, err := dockertest.NewPool("") + assert.NoError(err, "expected no error during setup of docker pool") + + runOpts := &dockertest.RunOptions{ + Name: "k3s-mock", + Repository: "rancher/k3s", + Tag: "v1.24.2-k3s1", + Cmd: []string{"server", "--cluster-init"}, + Env: []string{ + fmt.Sprintf("K3S_TOKEN=%s", token), + }, + Mounts: []string{ + "tmpfs:/run", + "tmpfs:/var/run", + }, + Privileged: true, + ExposedPorts: []string{ + "6443/tcp", + }, + } + + k3s, err := pool.RunWithOptions(runOpts, func(config *docker.HostConfig) { + // set AutoRemove to true so that stopped container goes away by itself + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{ + Name: "no", + } + }) + + assert.NoError(err, "expect no error creating k3s container") + + time.Sleep(60 * time.Second) + port = k3s.GetPort("6443/tcp") + dockerURL := os.Getenv("DOCKER_HOST") + if dockerURL != "" { + u, err := url.Parse(dockerURL) + if err != nil { + log.Fatalf("error parsing DOCKER_HOST: %v", err) + } + docker_host = u.Hostname() + } + + // setup docker_host if remote docker daemon is being used + + mgmtClient, err = mock.GenerateFakeClient() + assert.NoError(err, "expected no error during creation of mock client") + + cmd := &cobra.Command{} + args := []string{"test-mock-cluster-not-running", "test-mock-cluster-running", "test-mock-missing-cluster"} + tmpDir, err := ioutils.TempDir("/tmp", "gen-kubeconfig") + assert.NoError(err, "expected no error during creation of tmpDir") + namespace = "default" // all mock objects are created in mock namespace + seederv1alpha1.DefaultAPIPrefix = "k3s" //override since we are using k3s to mock + // patch port on clusters using annotation to allow kubeconfig to be extracted + for _, v := range []string{"test-mock-cluster-not-running", "test-mock-cluster-running"} { + cObj := &seederv1alpha1.Cluster{} + err = mgmtClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: v}, cObj) + assert.NoError(err, "expected no error while fetch mock clusters") + if cObj.Labels == nil { + cObj.Labels = make(map[string]string) + } + cObj.Labels[seederv1alpha1.OverrideAPIPortLabel] = port + if docker_host != "" { + cObj.Status.ClusterAddress = docker_host + } + cObj.Status.ClusterToken = token // update clusters token + err = mgmtClient.Update(ctx, cObj) + assert.NoError(err, "expected no error while patching cluster objects") + } + // define empty GenKubeConfig + g := &GenKubeconfig{ + Path: tmpDir, + } + err = g.preflightchecks(cmd, args) + assert.NoError(err, "expected no error during pre-flight checks") + + err = g.generateKubeConfig(cmd) + assert.NoError(err, "expected no error during kubeconfig generation") + + // check kubeconfig exists + _, err = os.Stat(filepath.Join(tmpDir, "test-mock-cluster-running-default.yaml")) + assert.NoError(err, "expect to find file test-mock-cluster-running-default.yaml") + err = os.RemoveAll(tmpDir) + assert.NoError(err, "expect no error during clean up of tmp dir") + + pool.Purge(k3s) +} diff --git a/cmd/seeder/pkg/plugin/logging.go b/cmd/seeder/pkg/plugin/logging.go new file mode 100644 index 0000000..6c47df9 --- /dev/null +++ b/cmd/seeder/pkg/plugin/logging.go @@ -0,0 +1,19 @@ +package plugin + +import "fmt" + +func genHeaderMessage(message string) string { + return fmt.Sprintf("\n🚜%s 🚜", message) +} + +func genPassMessage(message string) string { + return fmt.Sprintf("βœ” %s", message) +} + +func genFailMessage(message string) string { + return fmt.Sprintf("❌ %s", message) +} + +func genErrorMessage(err error) string { + return fmt.Sprintf("β›”execution stopped: %s", err.Error()) +} diff --git a/cmd/seeder/pkg/plugin/plugin.go b/cmd/seeder/pkg/plugin/plugin.go new file mode 100644 index 0000000..3d5cb22 --- /dev/null +++ b/cmd/seeder/pkg/plugin/plugin.go @@ -0,0 +1,77 @@ +package plugin + +import ( + "fmt" + + seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1" + cli "github.com/rancher/wrangler-cli" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func New() *cobra.Command { + root := cli.Command(&Plugin{}, cobra.Command{ + Short: "seeder plugin to interact with existing seeder installation bases", + Long: `seeder plugin allows automation of some routine tasks with seeder. +It can be used as a kubectl plugin by placing it in your path, and renaming the binary as kubectl-seeder or a standalone binary +Currently supported sub commands are: +* gen-kubeconfig: will generate an admin kubeconfig for a harvester cluster provisioned via seeder +* create-cluster: will create a new cluster object with some basic options +* recreate-cluster: will delete and re-create the cluster and patch the version if one is supplied`, + Use: "seeder -h", + }) + root.AddCommand( + NewGenKubeConfig(), + NewCreateCluster(), + NewRecreateCluster(), + ) + return root +} + +type Plugin struct { + Debug bool `usage:"enable debug logging" short:"d"` + Namespace string `usage:"namespace" short:"n"` +} + +func (p *Plugin) Run(cmd *cobra.Command, args []string) error { + return fmt.Errorf("please provide a valid sub-command") +} + +func (p *Plugin) PersistentPre(cmd *cobra.Command, args []string) error { + // enable debug log level at global level + if p.Debug { + logrus.SetLevel(logrus.DebugLevel) + logrus.Debug("debug level enabled") + } + + // setup k8s client for use with child-commands + err := seederv1alpha1.AddToScheme(scheme) + if err != nil { + return fmt.Errorf("error adding seeder schema: %v", err) + } + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + configOverrides := &clientcmd.ConfigOverrides{} + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) + config, err := kubeConfig.ClientConfig() + + mgmtClient, err = client.New(config, client.Options{ + Scheme: scheme, + }) + if err != nil { + return fmt.Errorf("error generating runtime client: %v", err) + } + + namespace = p.Namespace + + // if no namespace is specified identify an override if applicable from kubeconfig loading + if p.Namespace == "" { + namespace, _, err = kubeConfig.Namespace() + if err != nil { + return fmt.Errorf("error identifying namespace: %v", err) + } + } + + return nil +} diff --git a/cmd/seeder/pkg/plugin/recreatecluster.go b/cmd/seeder/pkg/plugin/recreatecluster.go new file mode 100644 index 0000000..3476644 --- /dev/null +++ b/cmd/seeder/pkg/plugin/recreatecluster.go @@ -0,0 +1,92 @@ +package plugin + +import ( + seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1" + command "github.com/rancher/wrangler-cli" + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type RecreateCluster struct { + Version string `usage:"[optional] version to use to recreate cluster" short:"v"` +} + +var originalCluster *seederv1alpha1.Cluster + +func NewRecreateCluster() *cobra.Command { + rc := command.Command(&RecreateCluster{}, cobra.Command{ + Short: "recreate cluster", + Long: `recreate-cluster will extract the config for an existing cluster, delete said cluster and re-create this cluster +using the existing settings. If version is provided, the version will be patched before creating new cluster.`, + Use: "recreate-cluster $CLUSTER_NAME", + Args: cobra.ExactArgs(1), + }) + + return rc +} + +func (r *RecreateCluster) Run(cmd *cobra.Command, args []string) error { + cmd.Println(genHeaderMessage("running pre-flight checks for recreate-cluster")) + exists, err := r.clusterExists(cmd) + if err != nil { + return err + } + + if !exists { + return err + } + + cmd.Println(genHeaderMessage("deleting cluster")) + err = r.deleteCluster(cmd) + if err != nil { + return err + } + + cmd.Println(genHeaderMessage("recreating cluster")) + return r.recreateCluster(cmd) +} + +func (r *RecreateCluster) Pre(cmd *cobra.Command, args []string) error { + clusterName = args[0] + return nil +} + +// clusterExists returns true,nil if cluster exists +func (r *RecreateCluster) clusterExists(cmd *cobra.Command) (bool, error) { + originalCluster = &seederv1alpha1.Cluster{} + err := mgmtClient.Get(cmd.Context(), types.NamespacedName{Name: clusterName, Namespace: namespace}, originalCluster) + if err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } else { + return false, err + } + } + + return true, nil +} + +func (r *RecreateCluster) deleteCluster(cmd *cobra.Command) error { + clusterObj := &seederv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + } + return mgmtClient.Delete(cmd.Context(), clusterObj) +} + +func (r *RecreateCluster) recreateCluster(cmd *cobra.Command) error { + if r.Version != "" { + originalCluster.Spec.HarvesterVersion = r.Version + } + // cleanup original cluster spec + originalCluster.Generation = 0 + originalCluster.ResourceVersion = "" + originalCluster.Finalizers = []string{} + originalCluster.CreationTimestamp = metav1.Time{} + originalCluster.DeletionTimestamp = nil + return mgmtClient.Create(cmd.Context(), originalCluster) +} diff --git a/cmd/seeder/pkg/plugin/recreatecluster_test.go b/cmd/seeder/pkg/plugin/recreatecluster_test.go new file mode 100644 index 0000000..95103ee --- /dev/null +++ b/cmd/seeder/pkg/plugin/recreatecluster_test.go @@ -0,0 +1,47 @@ +package plugin + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/types" + + seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1" + + "github.com/harvester/seeder/pkg/mock" + + "github.com/spf13/cobra" + + "github.com/stretchr/testify/require" +) + +func Test_RecreateCluster(t *testing.T) { + assert := require.New(t) + ctx = context.TODO() + namespace = "default" + var err error + mgmtClient, err = mock.GenerateFakeClient() + assert.NoError(err, "expected no error setting up mock client") + + clusterName = "test-mock-cluster-running" + rc := &cobra.Command{} + r := &RecreateCluster{ + Version: "v1.1.1", + } + + exists, err := r.clusterExists(rc) + assert.NoError(err, "expected no error while looking up cluster") + assert.True(exists, "expected cluster to exist") + + err = r.deleteCluster(rc) + assert.NoError(err, "expected no error deleting cluster") + + err = r.recreateCluster(rc) + assert.NoError(err, "expected no error recreating cluster") + + clusterObj := &seederv1alpha1.Cluster{} + err = mgmtClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: clusterName}, clusterObj) + assert.NoError(err, "expected no error looking up cluster") + assert.Equal("v1.1.1", clusterObj.Spec.HarvesterVersion, "expected cluster object version to match") + +} diff --git a/go.mod b/go.mod index b9a358e..43e876b 100644 --- a/go.mod +++ b/go.mod @@ -5,11 +5,15 @@ go 1.18 require ( github.com/go-logr/logr v1.2.3 github.com/google/uuid v1.3.0 + github.com/mitchellh/go-homedir v1.1.0 github.com/onsi/ginkgo/v2 v2.1.4 github.com/onsi/gomega v1.19.0 github.com/pkg/errors v0.9.1 github.com/rancher/dynamiclistener v0.3.3 github.com/rancher/wrangler v1.0.0 + github.com/rancher/wrangler-cli v0.0.0-20220624114648-479c5692ba22 + github.com/sirupsen/logrus v1.8.1 + github.com/spf13/cobra v1.4.0 github.com/stmcginnis/gofish v0.12.1-0.20220311113027-6072260f4c8d github.com/stretchr/testify v1.8.0 github.com/tinkerbell/rufio v0.0.0-20220601004315-4dc2085adc8e @@ -40,6 +44,7 @@ require ( github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/swag v0.19.15 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/lib/pq v1.10.2 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -49,12 +54,12 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.1.2 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect + k8s.io/klog v1.0.0 // indirect + sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 // indirect ) require ( @@ -121,4 +126,5 @@ require ( replace ( github.com/tinkerbell/rufio => github.com/tinkerbell/rufio v0.0.0-20220606134123-599b7401b5cc github.com/tinkerbell/tink v0.6.0 => github.com/tinkerbell/tink v0.6.1-0.20220524234633-0a800a4b5e25 + go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 => go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 ) diff --git a/go.sum b/go.sum index 5d6deaf..fc67272 100644 --- a/go.sum +++ b/go.sum @@ -498,6 +498,7 @@ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -563,6 +564,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -693,6 +695,8 @@ github.com/rancher/lasso v0.0.0-20210616224652-fc3ebd901c08/go.mod h1:9qZd/S8DqW github.com/rancher/wrangler v0.8.9/go.mod h1:Lte9WjPtGYxYacIWeiS9qawvu2R4NujFU9xuXWJvc/0= github.com/rancher/wrangler v1.0.0 h1:K+GHMhkpgcGIfYgOX9RKdEEiM8o3WjFpI2U0ljxy+bg= github.com/rancher/wrangler v1.0.0/go.mod h1:TR0R07P5oU6T2bO+6eOX0jcFvKy+zoDd6u+PZ2mHJKg= +github.com/rancher/wrangler-cli v0.0.0-20220624114648-479c5692ba22 h1:ADMwgJyVwmLXJBSm/nNobB1XGSmFCTA+TY/otxgIPu4= +github.com/rancher/wrangler-cli v0.0.0-20220624114648-479c5692ba22/go.mod h1:vyO9SU60oplNFa5ZqoEAFWmYKgj2F6remdy8p6H0SgI= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -728,6 +732,7 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -854,8 +859,8 @@ go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE= go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 h1:Tx9kY6yUkLge/pFG7IEMwDZy6CS2ajFc9TvQdPCW0uA= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 h1:FyBZqvoA/jbNzuAWLQE2kG820zMAkcilx6BMjGbL/E4= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1508,6 +1513,7 @@ k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= @@ -1554,8 +1560,9 @@ sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZw sigs.k8s.io/controller-runtime v0.10.1/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE= sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 h1:2sgAQQcY0dEW2SsQwTXhQV4vO6+rSslYx8K3XmM5hqQ= +sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g= sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs= diff --git a/pkg/mock/mock.go b/pkg/mock/mock.go index 444b895..b936816 100644 --- a/pkg/mock/mock.go +++ b/pkg/mock/mock.go @@ -1,6 +1,8 @@ package mock import ( + "strings" + seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1" "github.com/rancher/wrangler/pkg/yaml" rufio "github.com/tinkerbell/rufio/api/v1alpha1" @@ -9,7 +11,6 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "strings" ) const ( @@ -59,6 +60,128 @@ metadata: stringData: "username": "ADMIN" "password": "ADMIN" +--- +apiVersion: metal.harvesterhci.io/v1alpha1 +kind: Cluster +metadata: + name: test-mock-cluster-running + namespace: default +spec: + clusterConfig: + nameservers: + - 8.8.8.8 + imageURL: http://localhost/iso/ + nodes: + - addressPoolReference: + name: mock-pool + namespace: default + inventoryReference: + name: inventory-1 + namespace: default + version: v1.1.0 + vipConfig: + addressPoolReference: + name: mock-pool + namespace: default +status: + clusterAddress: 127.0.0.1 + status: clusterRunning + token: ZgSyOCtX4TowsNjP +--- +apiVersion: metal.harvesterhci.io/v1alpha1 +kind: Cluster +metadata: + name: test-mock-cluster-not-running + namespace: default +spec: + clusterConfig: + nameservers: + - 8.8.8.8 + imageURL: http://localhost/iso/ + nodes: + - addressPoolReference: + name: mock-pool + namespace: default + inventoryReference: + name: inventory-2 + namespace: default + version: v1.1.0 + vipConfig: + addressPoolReference: + name: mock-pool + namespace: default +status: + clusterAddress: 127.0.0.1 + status: clusterConfigReady + token: ZgSyOCtX4TowsNjP +--- +apiVersion: metal.harvesterhci.io/v1alpha1 +kind: Inventory +metadata: + name: inventory-1 + namespace: default +spec: + baseboardSpec: + connection: + authSecretRef: + name: hp-ilo + namespace: seeder + host: localhost + insecureTLS: true + port: 623 + events: + enabled: true + pollingInterval: 1h + managementInterfaceMacAddress: 5c:b9:01:89:c6:61 + primaryDisk: /dev/sda +status: + ownerCluster: + name: "" + namespace: "" + pxeBootConfig: {} + status: inventoryNodeReady +--- +apiVersion: metal.harvesterhci.io/v1alpha1 +kind: Inventory +metadata: + name: inventory-2 + namespace: default +spec: + baseboardSpec: + connection: + authSecretRef: + name: hp-ilo + namespace: seeder + host: localhost + insecureTLS: true + port: 623 + events: + enabled: true + pollingInterval: 1h + managementInterfaceMacAddress: 5c:b9:01:89:c6:61 + primaryDisk: /dev/sda +status: + ownerCluster: + name: "" + namespace: "" + pxeBootConfig: {} + status: "" +--- +apiVersion: metal.harvesterhci.io/v1alpha1 +kind: AddressPool +metadata: + name: mock-pool + namespace: default +spec: + cidr: 127.0.0.1/24 + gateway: 127.0.0.1 + netmask: 255.255.255.0 +status: + availableAddresses: 255 + lastAddress: 127.0.0.255 + netmask: 255.255.255.0 + startAddress: 127.0.0.1 + status: poolReady ` )