From 35721cfea81519695afa9693433ce0a9556d0e7f Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 7 Feb 2024 15:40:32 -0800 Subject: [PATCH] Adds helm & kustomize support for AKS service target --- cli/azd/.vscode/cspell.yaml | 4 + cli/azd/cmd/container.go | 8 +- cli/azd/pkg/helm/cli.go | 167 ++++++ cli/azd/pkg/helm/cli_test.go | 474 ++++++++++++++++++ cli/azd/pkg/helm/config.go | 19 + cli/azd/pkg/kustomize/cli.go | 84 ++++ cli/azd/pkg/kustomize/cli_test.go | 96 ++++ cli/azd/pkg/kustomize/config.go | 9 + .../{project => osutil}/expandable_string.go | 2 +- .../expandable_string_test.go | 2 +- cli/azd/pkg/project/container_helper_test.go | 7 +- cli/azd/pkg/project/framework_service.go | 10 +- .../pkg/project/framework_service_docker.go | 15 +- cli/azd/pkg/project/project_config.go | 3 +- cli/azd/pkg/project/service_config.go | 3 +- cli/azd/pkg/project/service_manager.go | 4 +- cli/azd/pkg/project/service_target.go | 1 + cli/azd/pkg/project/service_target_aks.go | 300 ++++++++++- .../pkg/project/service_target_aks_test.go | 268 +++++++++- cli/azd/pkg/tools/kubectl/kubectl.go | 14 + cli/azd/resources/alpha_features.yaml | 6 +- schemas/alpha/azure.yaml.json | 101 ++++ 22 files changed, 1538 insertions(+), 59 deletions(-) create mode 100644 cli/azd/pkg/helm/cli.go create mode 100644 cli/azd/pkg/helm/cli_test.go create mode 100644 cli/azd/pkg/helm/config.go create mode 100644 cli/azd/pkg/kustomize/cli.go create mode 100644 cli/azd/pkg/kustomize/cli_test.go create mode 100644 cli/azd/pkg/kustomize/config.go rename cli/azd/pkg/{project => osutil}/expandable_string.go (98%) rename cli/azd/pkg/{project => osutil}/expandable_string_test.go (96%) diff --git a/cli/azd/.vscode/cspell.yaml b/cli/azd/.vscode/cspell.yaml index 46871e3c6dd..a28c07a5270 100644 --- a/cli/azd/.vscode/cspell.yaml +++ b/cli/azd/.vscode/cspell.yaml @@ -88,6 +88,10 @@ overrides: - filename: pkg/azsdk/storage/storage_blob_client.go words: - azblob + - filename: pkg/project/service_target_aks.go + words: + - kustomization + - templating ignorePaths: - "**/*_test.go" - "**/mock*.go" diff --git a/cli/azd/cmd/container.go b/cli/azd/cmd/container.go index 4c57397f143..ee07a26fa90 100644 --- a/cli/azd/cmd/container.go +++ b/cli/azd/cmd/container.go @@ -28,12 +28,14 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/environment" "github.com/azure/azure-dev/cli/azd/pkg/environment/azdcontext" "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/pkg/helm" "github.com/azure/azure-dev/cli/azd/pkg/httputil" "github.com/azure/azure-dev/cli/azd/pkg/infra" "github.com/azure/azure-dev/cli/azd/pkg/infra/provisioning" "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/ioc" "github.com/azure/azure-dev/cli/azd/pkg/kubelogin" + "github.com/azure/azure-dev/cli/azd/pkg/kustomize" "github.com/azure/azure-dev/cli/azd/pkg/lazy" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/pipeline" @@ -466,6 +468,8 @@ func registerCommonDependencies(container *ioc.NestedContainer) { container.MustRegisterSingleton(kubectl.NewKubectl) container.MustRegisterSingleton(maven.NewMavenCli) container.MustRegisterSingleton(kubelogin.NewCli) + container.MustRegisterSingleton(helm.NewCli) + container.MustRegisterSingleton(kustomize.NewCli) container.MustRegisterSingleton(npm.NewNpmCli) container.MustRegisterSingleton(python.NewPythonCli) container.MustRegisterSingleton(swa.NewSwaCli) @@ -481,7 +485,7 @@ func registerCommonDependencies(container *ioc.NestedContainer) { // Service Targets serviceTargetMap := map[project.ServiceTargetKind]any{ - "": project.NewAppServiceTarget, + project.NonSpecifiedTarget: project.NewAppServiceTarget, project.AppServiceTarget: project.NewAppServiceTarget, project.AzureFunctionTarget: project.NewFunctionAppTarget, project.ContainerAppTarget: project.NewContainerAppTarget, @@ -497,7 +501,7 @@ func registerCommonDependencies(container *ioc.NestedContainer) { // Languages frameworkServiceMap := map[project.ServiceLanguageKind]any{ - "": project.NewDotNetProject, + project.ServiceLanguageNone: project.NewNoOpProject, project.ServiceLanguageDotNet: project.NewDotNetProject, project.ServiceLanguageCsharp: project.NewDotNetProject, project.ServiceLanguageFsharp: project.NewDotNetProject, diff --git a/cli/azd/pkg/helm/cli.go b/cli/azd/pkg/helm/cli.go new file mode 100644 index 00000000000..cd0f74a5bd4 --- /dev/null +++ b/cli/azd/pkg/helm/cli.go @@ -0,0 +1,167 @@ +package helm + +import ( + "context" + "encoding/json" + "fmt" + "log" + "time" + + "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/pkg/tools" +) + +type Cli struct { + commandRunner exec.CommandRunner +} + +func NewCli(commandRunner exec.CommandRunner) *Cli { + return &Cli{ + commandRunner: commandRunner, + } +} + +// Gets the name of the Tool +func (cli *Cli) Name() string { + return "helm" +} + +// Returns the installation URL to install the Helm CLI +func (cli *Cli) InstallUrl() string { + return "https://aka.ms/azure-dev/helm-install" +} + +// Checks whether or not the Helm CLI is installed and available within the PATH +func (cli *Cli) CheckInstalled(ctx context.Context) error { + if err := tools.ToolInPath("helm"); err != nil { + return err + } + + // We don't have a minimum required version of helm today, but + // for diagnostics purposes, let's fetch and log the version of helm + // we're using. + if ver, err := cli.getClientVersion(ctx); err != nil { + log.Printf("error fetching helm version: %s", err) + } else { + log.Printf("helm version: %s", ver) + } + + return nil +} + +// AddRepo adds a helm repo with the specified name and url +func (c *Cli) AddRepo(ctx context.Context, repo *Repository) error { + runArgs := exec.NewRunArgs("helm", "repo", "add", repo.Name, repo.Url) + _, err := c.commandRunner.Run(ctx, runArgs) + if err != nil { + return fmt.Errorf("failed to add repo %s: %w", repo.Name, err) + } + + return nil +} + +// UpdateRepo updates the helm repo with the specified name +func (c *Cli) UpdateRepo(ctx context.Context, repoName string) error { + runArgs := exec.NewRunArgs("helm", "repo", "update", repoName) + _, err := c.commandRunner.Run(ctx, runArgs) + if err != nil { + return fmt.Errorf("failed to add repo %s: %w", repoName, err) + } + + return nil +} + +// Install installs a helm release +func (c *Cli) Install(ctx context.Context, release *Release) error { + runArgs := exec.NewRunArgs("helm", "install", release.Name, release.Chart) + if release.Values != "" { + runArgs = runArgs.AppendParams("--values", release.Values) + } + + _, err := c.commandRunner.Run(ctx, runArgs) + if err != nil { + return fmt.Errorf("failed to install helm chart %s: %w", release.Chart, err) + } + + return nil +} + +// Upgrade upgrades a helm release to the specified version +// If the release did not previously exist, it will be installed +func (c *Cli) Upgrade(ctx context.Context, release *Release) error { + runArgs := exec.NewRunArgs("helm", "upgrade", release.Name, release.Chart, "--install", "--wait") + if release.Version != "" { + runArgs = runArgs.AppendParams("--version", release.Version) + } + + if release.Values != "" { + runArgs = runArgs.AppendParams("--values", release.Values) + } + + if release.Namespace != "" { + runArgs = runArgs.AppendParams( + "--namespace", release.Namespace, + "--create-namespace", + ) + } + + _, err := c.commandRunner.Run(ctx, runArgs) + if err != nil { + return fmt.Errorf("failed to install helm chart %s: %w", release.Chart, err) + } + + return nil +} + +// Status returns the status of a helm release +func (c *Cli) Status(ctx context.Context, release *Release) (*StatusResult, error) { + runArgs := exec.NewRunArgs("helm", "status", release.Name, "--output", "json") + if release.Namespace != "" { + runArgs = runArgs.AppendParams("--namespace", release.Namespace) + } + + runResult, err := c.commandRunner.Run(ctx, runArgs) + if err != nil { + return nil, fmt.Errorf("failed to query status for helm chart %s: %w", release.Chart, err) + } + + var result *StatusResult + if err := json.Unmarshal([]byte(runResult.Stdout), &result); err != nil { + return nil, fmt.Errorf("failed to parse status for helm chart %s: %w", release.Chart, err) + } + + return result, nil +} + +func (cli *Cli) getClientVersion(ctx context.Context) (string, error) { + runArgs := exec.NewRunArgs("helm", "version", "--template", "{{.Version}}") + versionResult, err := cli.commandRunner.Run(ctx, runArgs) + if err != nil { + return "", fmt.Errorf("fetching helm version: %w", err) + } + + return versionResult.Stdout[1:], nil +} + +// StatusResult is the result of a helm status command +type StatusResult struct { + Name string `json:"name"` + Info StatusInfo `json:"info"` + Version float64 `json:"version"` + Namespace string `json:"namespace"` +} + +// StatusInfo is the status information of a helm release +type StatusInfo struct { + FirstDeployed time.Time `json:"first_deployed"` + LastDeployed time.Time `json:"last_deployed"` + Status StatusKind `json:"status"` + Notes string `json:"notes"` +} + +type StatusKind string + +const ( + // StatusKindDeployed is the status of a helm release that has been deployed + StatusKindDeployed StatusKind = "deployed" +) diff --git a/cli/azd/pkg/helm/cli_test.go b/cli/azd/pkg/helm/cli_test.go new file mode 100644 index 00000000000..3ac1a7687b5 --- /dev/null +++ b/cli/azd/pkg/helm/cli_test.go @@ -0,0 +1,474 @@ +package helm + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/test/mocks" + "github.com/stretchr/testify/require" +) + +func Test_Cli_AddRepo(t *testing.T) { + repo := &Repository{ + Name: "test", + Url: "https://test.com", + } + + t.Run("Success", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm repo add") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.AddRepo(*mockContext.Context, repo) + require.True(t, ran) + require.NoError(t, err) + + require.Equal(t, "helm", runArgs.Cmd) + require.Equal(t, []string{ + "repo", + "add", + "test", + "https://test.com", + }, runArgs.Args) + }) + + t.Run("Failure", func(t *testing.T) { + ran := false + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm repo add") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + return exec.NewRunResult(1, "", ""), errors.New("failed to add repo") + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.AddRepo(*mockContext.Context, repo) + + require.True(t, ran) + require.Error(t, err) + require.ErrorContains(t, err, "failed to add repo") + }) +} + +func Test_Cli_UpdateRepo(t *testing.T) { + t.Run("Success", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm repo update") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.UpdateRepo(*mockContext.Context, "test") + require.True(t, ran) + require.NoError(t, err) + + require.Equal(t, "helm", runArgs.Cmd) + require.Equal(t, []string{ + "repo", + "update", + "test", + }, runArgs.Args) + }) + + t.Run("Failure", func(t *testing.T) { + ran := false + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm repo update") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + return exec.NewRunResult(1, "", ""), errors.New("failed to update repo") + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.UpdateRepo(*mockContext.Context, "test") + + require.True(t, ran) + require.Error(t, err) + require.ErrorContains(t, err, "failed to update repo") + }) +} + +func Test_Cli_Install(t *testing.T) { + release := &Release{ + Name: "test", + Chart: "test/chart", + Version: "1.0.0", + } + + t.Run("Success", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm install") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.Install(*mockContext.Context, release) + require.True(t, ran) + require.NoError(t, err) + + require.Equal(t, "helm", runArgs.Cmd) + require.Equal(t, []string{ + "install", + "test", + "test/chart", + }, runArgs.Args) + }) + + t.Run("WithValues", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + releaseWithValues := *release + releaseWithValues.Values = "values.yaml" + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm install") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.Install(*mockContext.Context, &releaseWithValues) + require.True(t, ran) + require.NoError(t, err) + + require.Equal(t, "helm", runArgs.Cmd) + require.Equal(t, []string{ + "install", + "test", + "test/chart", + "--values", + "values.yaml", + }, runArgs.Args) + }) + + t.Run("Failure", func(t *testing.T) { + ran := false + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm install") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + return exec.NewRunResult(1, "", ""), errors.New("failed to install release") + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.Install(*mockContext.Context, release) + + require.True(t, ran) + require.Error(t, err) + require.ErrorContains(t, err, "failed to install release") + }) +} + +func Test_Cli_Upgrade(t *testing.T) { + release := &Release{ + Name: "test", + Chart: "test/chart", + } + + t.Run("Success", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm upgrade") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.Upgrade(*mockContext.Context, release) + require.True(t, ran) + require.NoError(t, err) + + require.Equal(t, "helm", runArgs.Cmd) + require.Equal(t, []string{ + "upgrade", + "test", + "test/chart", + "--install", + "--wait", + }, runArgs.Args) + }) + + t.Run("WithValues", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + releaseWithValues := *release + releaseWithValues.Values = "values.yaml" + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm upgrade") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.Upgrade(*mockContext.Context, &releaseWithValues) + require.True(t, ran) + require.NoError(t, err) + + require.Equal(t, "helm", runArgs.Cmd) + require.Equal(t, []string{ + "upgrade", + "test", + "test/chart", + "--install", + "--wait", + "--values", + "values.yaml", + }, runArgs.Args) + }) + + t.Run("WithVersion", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + releaseWithVersion := *release + releaseWithVersion.Version = "1.0.0" + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm upgrade") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.Upgrade(*mockContext.Context, &releaseWithVersion) + require.True(t, ran) + require.NoError(t, err) + + require.Equal(t, "helm", runArgs.Cmd) + require.Equal(t, []string{ + "upgrade", + "test", + "test/chart", + "--install", + "--wait", + "--version", + "1.0.0", + }, runArgs.Args) + }) + + t.Run("WithNamespace", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + releaseWithNamespace := *release + releaseWithNamespace.Namespace = "test-namespace" + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm upgrade") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.Upgrade(*mockContext.Context, &releaseWithNamespace) + require.True(t, ran) + require.NoError(t, err) + + require.Equal(t, "helm", runArgs.Cmd) + require.Equal(t, []string{ + "upgrade", + "test", + "test/chart", + "--install", + "--wait", + "--namespace", + "test-namespace", + "--create-namespace", + }, runArgs.Args) + }) + + t.Run("Failure", func(t *testing.T) { + ran := false + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm upgrade") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + return exec.NewRunResult(1, "", ""), errors.New("failed to upgrade release") + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.Upgrade(*mockContext.Context, release) + + require.True(t, ran) + require.Error(t, err) + require.ErrorContains(t, err, "failed to upgrade release") + }) +} + +func Test_Cli_Status(t *testing.T) { + release := &Release{ + Name: "test", + } + + t.Run("Success", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm status") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, `{ + "info": { + "status": "deployed" + } + }`, ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + status, err := cli.Status(*mockContext.Context, release) + require.True(t, ran) + require.NoError(t, err) + require.Equal(t, StatusKindDeployed, status.Info.Status) + + require.Equal(t, "helm", runArgs.Cmd) + require.Equal(t, []string{ + "status", + "test", + "--output", + "json", + }, runArgs.Args) + }) + + t.Run("WithNamespace", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + releaseWithNamespace := *release + releaseWithNamespace.Namespace = "test-namespace" + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm status") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, `{ + "info": { + "status": "deployed" + } + }`, ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + status, err := cli.Status(*mockContext.Context, &releaseWithNamespace) + require.True(t, ran) + require.NoError(t, err) + require.Equal(t, StatusKindDeployed, status.Info.Status) + + require.Equal(t, "helm", runArgs.Cmd) + require.Equal(t, []string{ + "status", + "test", + "--output", + "json", + "--namespace", + "test-namespace", + }, runArgs.Args) + }) + + t.Run("Failure", func(t *testing.T) { + ran := false + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm status") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + return exec.NewRunResult(1, "", ""), errors.New("failed to get status") + }) + + cli := NewCli(mockContext.CommandRunner) + _, err := cli.Status(*mockContext.Context, release) + + require.True(t, ran) + require.Error(t, err) + require.ErrorContains(t, err, "failed to get status") + }) +} diff --git a/cli/azd/pkg/helm/config.go b/cli/azd/pkg/helm/config.go new file mode 100644 index 00000000000..b80619d6b13 --- /dev/null +++ b/cli/azd/pkg/helm/config.go @@ -0,0 +1,19 @@ +package helm + +type Config struct { + Repositories []*Repository `yaml:"repositories"` + Releases []*Release `yaml:"releases"` +} + +type Repository struct { + Name string `yaml:"name"` + Url string `yaml:"url"` +} + +type Release struct { + Name string `yaml:"name"` + Chart string `yaml:"chart"` + Version string `yaml:"version"` + Namespace string `yaml:"namespace"` + Values string `yaml:"values"` +} diff --git a/cli/azd/pkg/kustomize/cli.go b/cli/azd/pkg/kustomize/cli.go new file mode 100644 index 00000000000..dd17b7660c8 --- /dev/null +++ b/cli/azd/pkg/kustomize/cli.go @@ -0,0 +1,84 @@ +package kustomize + +import ( + "context" + "fmt" + "log" + + "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/pkg/tools" +) + +// Cli is a wrapper around the kustomize cli +type Cli struct { + commandRunner exec.CommandRunner + cwd string +} + +// NewCli creates a new instance of the kustomize cli +func NewCli(commandRunner exec.CommandRunner) *Cli { + return &Cli{ + commandRunner: commandRunner, + } +} + +func (cli *Cli) Name() string { + return "kustomize" +} + +// Returns the installation URL to install the Kustomize CLI +func (cli *Cli) InstallUrl() string { + return "https://aka.ms/azure-dev/kustomize-install" +} + +// Checks whether or not the Kustomize CLI is installed and available within the PATH +func (cli *Cli) CheckInstalled(ctx context.Context) error { + if err := tools.ToolInPath("kustomize"); err != nil { + return err + } + + // We don't have a minimum required version of kustomize today, but + // for diagnostics purposes, let's fetch and log the version of kustomize + // we're using. + if ver, err := cli.getClientVersion(ctx); err != nil { + log.Printf("error fetching kustomize version: %s", err) + } else { + log.Printf("kustomize version: %s", ver) + } + + return nil +} + +// WithCwd sets the working directory for the kustomize command +func (cli *Cli) WithCwd(cwd string) *Cli { + cli.cwd = cwd + return cli +} + +// Edit runs the kustomize edit command with the specified args +func (cli *Cli) Edit(ctx context.Context, args ...string) error { + runArgs := exec.NewRunArgs("kustomize", "edit"). + AppendParams(args...) + + if cli.cwd != "" { + runArgs = runArgs.WithCwd(cli.cwd) + } + + _, err := cli.commandRunner.Run(ctx, runArgs) + + if err != nil { + return fmt.Errorf("failed running kustomize edit: %w", err) + } + + return nil +} + +func (cli *Cli) getClientVersion(ctx context.Context) (string, error) { + runArgs := exec.NewRunArgs("kustomize", "version") + versionResult, err := cli.commandRunner.Run(ctx, runArgs) + if err != nil { + return "", fmt.Errorf("fetching kustomize version: %w", err) + } + + return versionResult.Stdout[1:], nil +} diff --git a/cli/azd/pkg/kustomize/cli_test.go b/cli/azd/pkg/kustomize/cli_test.go new file mode 100644 index 00000000000..2abf3b17ae6 --- /dev/null +++ b/cli/azd/pkg/kustomize/cli_test.go @@ -0,0 +1,96 @@ +package kustomize + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/test/mocks" + "github.com/stretchr/testify/require" +) + +func Test_Edit(t *testing.T) { + args := []string{"set", "image", "nginx=nginx:1.7.9"} + + t.Run("Success", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kustomize edit") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.Edit(*mockContext.Context, args...) + require.True(t, ran) + require.NoError(t, err) + + expected := []string{"edit"} + expected = append(expected, args...) + + require.Equal(t, "kustomize", runArgs.Cmd) + require.Equal(t, "", runArgs.Cwd) + require.Equal(t, expected, runArgs.Args) + }) + + t.Run("WithCwd", func(t *testing.T) { + ran := false + var runArgs exec.RunArgs + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kustomize edit") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + runArgs = args + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli. + WithCwd("/tmp"). + Edit(*mockContext.Context, args...) + + require.True(t, ran) + require.NoError(t, err) + + expected := []string{"edit"} + expected = append(expected, args...) + + require.Equal(t, "kustomize", runArgs.Cmd) + require.Equal(t, "/tmp", runArgs.Cwd) + require.Equal(t, expected, runArgs.Args) + }) + + t.Run("Failure", func(t *testing.T) { + ran := false + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner. + When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kustomize edit") + }). + RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ran = true + return exec.NewRunResult(1, "", ""), errors.New("failed to edit kustomize config") + }) + + cli := NewCli(mockContext.CommandRunner) + err := cli.Edit(*mockContext.Context, args...) + + require.True(t, ran) + require.Error(t, err) + require.ErrorContains(t, err, "failed to edit kustomize config") + }) +} diff --git a/cli/azd/pkg/kustomize/config.go b/cli/azd/pkg/kustomize/config.go new file mode 100644 index 00000000000..7b56f9795e4 --- /dev/null +++ b/cli/azd/pkg/kustomize/config.go @@ -0,0 +1,9 @@ +package kustomize + +import "github.com/azure/azure-dev/cli/azd/pkg/osutil" + +type Config struct { + Directory osutil.ExpandableString `yaml:"dir"` + Edits []osutil.ExpandableString `yaml:"edits"` + Env map[string]osutil.ExpandableString `yaml:"env"` +} diff --git a/cli/azd/pkg/project/expandable_string.go b/cli/azd/pkg/osutil/expandable_string.go similarity index 98% rename from cli/azd/pkg/project/expandable_string.go rename to cli/azd/pkg/osutil/expandable_string.go index fe0bc9985ac..ee5495f9294 100644 --- a/cli/azd/pkg/project/expandable_string.go +++ b/cli/azd/pkg/osutil/expandable_string.go @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package project +package osutil import ( "fmt" diff --git a/cli/azd/pkg/project/expandable_string_test.go b/cli/azd/pkg/osutil/expandable_string_test.go similarity index 96% rename from cli/azd/pkg/project/expandable_string_test.go rename to cli/azd/pkg/osutil/expandable_string_test.go index f009c6f6fe4..8a8db9e30b4 100644 --- a/cli/azd/pkg/project/expandable_string_test.go +++ b/cli/azd/pkg/osutil/expandable_string_test.go @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package project +package osutil import ( "testing" diff --git a/cli/azd/pkg/project/container_helper_test.go b/cli/azd/pkg/project/container_helper_test.go index 7af3a072eb8..ac5c87fa36a 100644 --- a/cli/azd/pkg/project/container_helper_test.go +++ b/cli/azd/pkg/project/container_helper_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/azure/azure-dev/cli/azd/pkg/environment" + "github.com/azure/azure-dev/cli/azd/pkg/osutil" "github.com/azure/azure-dev/cli/azd/test/mocks" "github.com/azure/azure-dev/cli/azd/test/mocks/mockenv" "github.com/benbjohnson/clock" @@ -42,7 +43,7 @@ func Test_ContainerHelper_LocalImageTag(t *testing.T) { { "ImageTagSpecified", DockerProjectOptions{ - Tag: NewExpandableString("contoso/contoso-image:latest"), + Tag: osutil.NewExpandableString("contoso/contoso-image:latest"), }, "contoso/contoso-image:latest"}, } @@ -109,7 +110,7 @@ func Test_Resolve_RegistryName(t *testing.T) { envManager := &mockenv.MockEnvManager{} containerHelper := NewContainerHelper(env, envManager, clock.NewMock(), nil, nil) serviceConfig := createTestServiceConfig("./src/api", ContainerAppTarget, ServiceLanguageTypeScript) - serviceConfig.Docker.Registry = NewExpandableString("contoso.azurecr.io") + serviceConfig.Docker.Registry = osutil.NewExpandableString("contoso.azurecr.io") registryName, err := containerHelper.RegistryName(*mockContext.Context, serviceConfig) require.NoError(t, err) @@ -123,7 +124,7 @@ func Test_Resolve_RegistryName(t *testing.T) { envManager := &mockenv.MockEnvManager{} containerHelper := NewContainerHelper(env, envManager, clock.NewMock(), nil, nil) serviceConfig := createTestServiceConfig("./src/api", ContainerAppTarget, ServiceLanguageTypeScript) - serviceConfig.Docker.Registry = NewExpandableString("${MY_CUSTOM_REGISTRY}") + serviceConfig.Docker.Registry = osutil.NewExpandableString("${MY_CUSTOM_REGISTRY}") registryName, err := containerHelper.RegistryName(*mockContext.Context, serviceConfig) require.NoError(t, err) diff --git a/cli/azd/pkg/project/framework_service.go b/cli/azd/pkg/project/framework_service.go index c4c31fd926d..f90d205a85a 100644 --- a/cli/azd/pkg/project/framework_service.go +++ b/cli/azd/pkg/project/framework_service.go @@ -15,6 +15,7 @@ import ( type ServiceLanguageKind string const ( + ServiceLanguageNone ServiceLanguageKind = "" ServiceLanguageDotNet ServiceLanguageKind = "dotnet" ServiceLanguageCsharp ServiceLanguageKind = "csharp" ServiceLanguageFsharp ServiceLanguageKind = "fsharp" @@ -26,17 +27,14 @@ const ( ) func parseServiceLanguage(kind ServiceLanguageKind) (ServiceLanguageKind, error) { - if string(kind) == "" { - return ServiceLanguageKind(""), fmt.Errorf("language property must not be empty") - } - // aliases if string(kind) == "py" { return ServiceLanguagePython, nil } switch kind { - case ServiceLanguageDotNet, + case ServiceLanguageNone, + ServiceLanguageDotNet, ServiceLanguageCsharp, ServiceLanguageFsharp, ServiceLanguageJavaScript, @@ -47,7 +45,7 @@ func parseServiceLanguage(kind ServiceLanguageKind) (ServiceLanguageKind, error) return kind, nil } - return ServiceLanguageKind(""), fmt.Errorf("unsupported language '%s'", kind) + return ServiceLanguageKind("Unsupported"), fmt.Errorf("unsupported language '%s'", kind) } type FrameworkRequirements struct { diff --git a/cli/azd/pkg/project/framework_service_docker.go b/cli/azd/pkg/project/framework_service_docker.go index d22bd66a373..9d4f14796b6 100644 --- a/cli/azd/pkg/project/framework_service_docker.go +++ b/cli/azd/pkg/project/framework_service_docker.go @@ -23,6 +23,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/environment" "github.com/azure/azure-dev/cli/azd/pkg/exec" "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/osutil" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/tools" "github.com/azure/azure-dev/cli/azd/pkg/tools/azcli" @@ -32,13 +33,13 @@ import ( ) type DockerProjectOptions struct { - Path string `yaml:"path,omitempty" json:"path,omitempty"` - Context string `yaml:"context,omitempty" json:"context,omitempty"` - Platform string `yaml:"platform,omitempty" json:"platform,omitempty"` - Target string `yaml:"target,omitempty" json:"target,omitempty"` - Tag ExpandableString `yaml:"tag,omitempty" json:"tag,omitempty"` - Registry ExpandableString `yaml:"registry,omitempty" json:"registry,omitempty"` - BuildArgs []string `yaml:"buildArgs,omitempty" json:"buildArgs,omitempty"` + Path string `yaml:"path,omitempty" json:"path,omitempty"` + Context string `yaml:"context,omitempty" json:"context,omitempty"` + Platform string `yaml:"platform,omitempty" json:"platform,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + Tag osutil.ExpandableString `yaml:"tag,omitempty" json:"tag,omitempty"` + Registry osutil.ExpandableString `yaml:"registry,omitempty" json:"registry,omitempty"` + BuildArgs []string `yaml:"buildArgs,omitempty" json:"buildArgs,omitempty"` } type dockerBuildResult struct { diff --git a/cli/azd/pkg/project/project_config.go b/cli/azd/pkg/project/project_config.go index 4debd4c90e1..b9902b8b04e 100644 --- a/cli/azd/pkg/project/project_config.go +++ b/cli/azd/pkg/project/project_config.go @@ -5,6 +5,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/ext" "github.com/azure/azure-dev/cli/azd/pkg/infra/provisioning" + "github.com/azure/azure-dev/cli/azd/pkg/osutil" "github.com/azure/azure-dev/cli/azd/pkg/platform" "github.com/azure/azure-dev/cli/azd/pkg/state" "github.com/azure/azure-dev/cli/azd/pkg/workflow" @@ -16,7 +17,7 @@ import ( type ProjectConfig struct { RequiredVersions *RequiredVersions `yaml:"requiredVersions,omitempty"` Name string `yaml:"name"` - ResourceGroupName ExpandableString `yaml:"resourceGroup,omitempty"` + ResourceGroupName osutil.ExpandableString `yaml:"resourceGroup,omitempty"` Path string `yaml:"-"` Metadata *ProjectMetadata `yaml:"metadata,omitempty"` Services map[string]*ServiceConfig `yaml:"services,omitempty"` diff --git a/cli/azd/pkg/project/service_config.go b/cli/azd/pkg/project/service_config.go index 7ec19fdf49d..fc15269fb9d 100644 --- a/cli/azd/pkg/project/service_config.go +++ b/cli/azd/pkg/project/service_config.go @@ -6,6 +6,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/apphost" "github.com/azure/azure-dev/cli/azd/pkg/ext" "github.com/azure/azure-dev/cli/azd/pkg/infra/provisioning" + "github.com/azure/azure-dev/cli/azd/pkg/osutil" ) type ServiceConfig struct { @@ -14,7 +15,7 @@ type ServiceConfig struct { // The friendly name/key of the project from the azure.yaml file Name string `yaml:"-"` // The name used to override the default azure resource name - ResourceName ExpandableString `yaml:"resourceName,omitempty"` + ResourceName osutil.ExpandableString `yaml:"resourceName,omitempty"` // The relative path to the project folder from the project root RelativePath string `yaml:"project"` // The azure hosting model to use, ex) appservice, function, containerapp diff --git a/cli/azd/pkg/project/service_manager.go b/cli/azd/pkg/project/service_manager.go index 2049e853b89..f32764ad9ed 100644 --- a/cli/azd/pkg/project/service_manager.go +++ b/cli/azd/pkg/project/service_manager.go @@ -562,7 +562,8 @@ func (sm *serviceManager) GetFrameworkService(ctx context.Context, serviceConfig // For hosts which run in containers, if the source project is not already a container, we need to wrap it in a docker // project that handles the containerization. - if serviceConfig.Host.RequiresContainer() && serviceConfig.Language != ServiceLanguageDocker { + requiresLanguage := serviceConfig.Language != ServiceLanguageDocker && serviceConfig.Language != ServiceLanguageNone + if serviceConfig.Host.RequiresContainer() && requiresLanguage { var compositeFramework CompositeFrameworkService if err := sm.serviceLocator.ResolveNamed(string(ServiceLanguageDocker), &compositeFramework); err != nil { panic(fmt.Errorf( @@ -574,7 +575,6 @@ func (sm *serviceManager) GetFrameworkService(ctx context.Context, serviceConfig } compositeFramework.SetSource(frameworkService) - frameworkService = compositeFramework } diff --git a/cli/azd/pkg/project/service_target.go b/cli/azd/pkg/project/service_target.go index 63d67dfe596..2c99a945e68 100644 --- a/cli/azd/pkg/project/service_target.go +++ b/cli/azd/pkg/project/service_target.go @@ -18,6 +18,7 @@ import ( type ServiceTargetKind string const ( + NonSpecifiedTarget ServiceTargetKind = "" AppServiceTarget ServiceTargetKind = "appservice" ContainerAppTarget ServiceTargetKind = "containerapp" AzureFunctionTarget ServiceTargetKind = "function" diff --git a/cli/azd/pkg/project/service_target_aks.go b/cli/azd/pkg/project/service_target_aks.go index f89f635074d..6f52cdea22b 100644 --- a/cli/azd/pkg/project/service_target_aks.go +++ b/cli/azd/pkg/project/service_target_aks.go @@ -6,26 +6,38 @@ import ( "fmt" "log" "net/url" + "os" "path/filepath" "strings" + "time" + "github.com/azure/azure-dev/cli/azd/pkg/alpha" "github.com/azure/azure-dev/cli/azd/pkg/async" "github.com/azure/azure-dev/cli/azd/pkg/azure" "github.com/azure/azure-dev/cli/azd/pkg/convert" "github.com/azure/azure-dev/cli/azd/pkg/environment" "github.com/azure/azure-dev/cli/azd/pkg/ext" + "github.com/azure/azure-dev/cli/azd/pkg/helm" "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/kubelogin" + "github.com/azure/azure-dev/cli/azd/pkg/kustomize" + "github.com/azure/azure-dev/cli/azd/pkg/osutil" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/tools" "github.com/azure/azure-dev/cli/azd/pkg/tools/azcli" "github.com/azure/azure-dev/cli/azd/pkg/tools/kubectl" + "github.com/sethvargo/go-retry" ) const ( defaultDeploymentPath = "manifests" ) +var ( + featureHelm alpha.FeatureId = alpha.MustFeatureKey("aks.helm") + featureKustomize alpha.FeatureId = alpha.MustFeatureKey("aks.kustomize") +) + // The AKS configuration options type AksOptions struct { // The namespace used for deploying k8s resources. Defaults to the project name @@ -38,6 +50,10 @@ type AksOptions struct { Deployment AksDeploymentOptions `yaml:"deployment"` // The services service configuration options Service AksServiceOptions `yaml:"service"` + // The helm configuration options + Helm *helm.Config `yaml:"helm"` + // The kustomize configuration options + Kustomize *kustomize.Config `yaml:"kustomize"` } // The AKS ingress options @@ -64,7 +80,10 @@ type aksTarget struct { resourceManager ResourceManager kubectl kubectl.KubectlCli kubeLoginCli *kubelogin.Cli + helmCli *helm.Cli + kustomizeCli *kustomize.Cli containerHelper *ContainerHelper + featureManager *alpha.FeatureManager } // Creates a new instance of the AKS service target @@ -76,7 +95,10 @@ func NewAksTarget( resourceManager ResourceManager, kubectlCli kubectl.KubectlCli, kubeLoginCli *kubelogin.Cli, + helmCli *helm.Cli, + kustomizeCli *kustomize.Cli, containerHelper *ContainerHelper, + featureManager *alpha.FeatureManager, ) ServiceTarget { return &aksTarget{ env: env, @@ -86,7 +108,10 @@ func NewAksTarget( resourceManager: resourceManager, kubectl: kubectlCli, kubeLoginCli: kubeLoginCli, + helmCli: helmCli, + kustomizeCli: kustomizeCli, containerHelper: containerHelper, + featureManager: featureManager, } } @@ -96,6 +121,14 @@ func (t *aksTarget) RequiredExternalTools(ctx context.Context) []tools.ExternalT allTools = append(allTools, t.containerHelper.RequiredExternalTools(ctx)...) allTools = append(allTools, t.kubectl) + if t.featureManager.IsEnabled(featureHelm) { + allTools = append(allTools, t.helmCli) + } + + if t.featureManager.IsEnabled(featureKustomize) { + allTools = append(allTools, t.kustomizeCli) + } + return allTools } @@ -159,43 +192,64 @@ func (t *aksTarget) Deploy( return } - // Login, tag & push container image to ACR - containerDeployTask := t.containerHelper.Deploy(ctx, serviceConfig, packageOutput, targetResource, true) - syncProgress(task, containerDeployTask.Progress()) + if packageOutput.PackagePath != "" { + // Login, tag & push container image to ACR + containerDeployTask := t.containerHelper.Deploy(ctx, serviceConfig, packageOutput, targetResource, true) + syncProgress(task, containerDeployTask.Progress()) + + _, err := containerDeployTask.Await() + if err != nil { + task.SetError(err) + return + } + } // Sync environment t.kubectl.SetEnv(t.env.Dotenv()) - task.SetProgress(NewServiceProgress("Applying k8s manifests")) - deploymentPath := serviceConfig.K8s.DeploymentPath - if deploymentPath == "" { - deploymentPath = defaultDeploymentPath - } + // Deploy k8s resources in the following order: + // 1. Helm + // 2. Kustomize + // 3. Manifests + // + // Users may install a helm chart to setup their cluster with custom resource definitions that their + // custom manifests depend on. + // Users are more likely to either deploy with kustomize or vanilla manifests but they could do both. - err := t.kubectl.Apply( - ctx, - filepath.Join(serviceConfig.RelativePath, deploymentPath), - nil, - ) + deployed := false + + // Helm Support + helmDeployed, err := t.deployHelmCharts(ctx, serviceConfig, task) if err != nil { - task.SetError(fmt.Errorf("failed applying kube manifests: %w", err)) + task.SetError(fmt.Errorf("helm deployment failed: %w", err)) return } - deploymentName := serviceConfig.K8s.Deployment.Name - if deploymentName == "" { - deploymentName = serviceConfig.Name + deployed = deployed || helmDeployed + + // Kustomize Support + kustomizeDeployed, err := t.deployKustomize(ctx, serviceConfig, task) + if err != nil { + task.SetError(fmt.Errorf("kustomize deployment failed: %w", err)) + return } - // It is not a requirement for a AZD deploy to contain a deployment object - // If we don't find any deployment within the namespace we will continue - task.SetProgress(NewServiceProgress("Verifying deployment")) - deployment, err := t.waitForDeployment(ctx, deploymentName) - if err != nil && !errors.Is(err, kubectl.ErrResourceNotFound) { + deployed = deployed || kustomizeDeployed + + // Vanilla k8s manifests with minimal templating support + deployment, err := t.deployManifests(ctx, serviceConfig, task) + if err != nil && !os.IsNotExist(err) { task.SetError(err) return } + deployed = deployed || deployment != nil + + if !deployed { + task.SetError(errors.New("no deployment manifests found")) + return + } + task.SetProgress(NewServiceProgress("Fetching endpoints for AKS service")) endpoints, err := t.Endpoints(ctx, serviceConfig, targetResource) if err != nil { @@ -229,6 +283,194 @@ func (t *aksTarget) Deploy( }) } +// deployManifests deploys raw or templated yaml manifests to the k8s cluster +func (t *aksTarget) deployManifests( + ctx context.Context, + serviceConfig *ServiceConfig, + task *async.TaskContextWithProgress[*ServiceDeployResult, ServiceProgress], +) (*kubectl.Deployment, error) { + deploymentPath := serviceConfig.K8s.DeploymentPath + if deploymentPath == "" { + deploymentPath = defaultDeploymentPath + } + + // Manifests are optional so we will continue if the directory does not exist + if _, err := os.Stat(deploymentPath); os.IsNotExist(err) { + return nil, err + } + + task.SetProgress(NewServiceProgress("Applying k8s manifests")) + err := t.kubectl.Apply( + ctx, + filepath.Join(serviceConfig.RelativePath, deploymentPath), + nil, + ) + if err != nil { + return nil, fmt.Errorf("failed applying kube manifests: %w", err) + } + + deploymentName := serviceConfig.K8s.Deployment.Name + if deploymentName == "" { + deploymentName = serviceConfig.Name + } + + // It is not a requirement for a AZD deploy to contain a deployment object + // If we don't find any deployment within the namespace we will continue + task.SetProgress(NewServiceProgress("Verifying deployment")) + deployment, err := t.waitForDeployment(ctx, deploymentName) + if err != nil && !errors.Is(err, kubectl.ErrResourceNotFound) { + return nil, err + } + + return deployment, nil +} + +// deployKustomize deploys kustomize manifests to the k8s cluster +func (t *aksTarget) deployKustomize( + ctx context.Context, + serviceConfig *ServiceConfig, + task *async.TaskContextWithProgress[*ServiceDeployResult, ServiceProgress], +) (bool, error) { + if serviceConfig.K8s.Kustomize == nil { + return false, nil + } + + if !t.featureManager.IsEnabled(featureKustomize) { + return false, fmt.Errorf( + "Kustomize support is not enabled. Run '%s' to enable it.", + alpha.GetEnableCommand(featureKustomize), + ) + } + + task.SetProgress(NewServiceProgress("Applying k8s manifests with Kustomize")) + overlayPath, err := serviceConfig.K8s.Kustomize.Directory.Envsubst(t.env.Getenv) + if err != nil { + return false, fmt.Errorf("failed to envsubst kustomize directory: %w", err) + } + + // When deploying with kustomize we need to specify the full path to the kustomize directory. + // This can either be a base or overlay directory but must contain a kustomization.yaml file + kustomizeDir := filepath.Join(serviceConfig.Project.Path, serviceConfig.RelativePath, overlayPath) + if _, err := os.Stat(kustomizeDir); os.IsNotExist(err) { + return false, fmt.Errorf("kustomize directory '%s' does not exist: %w", kustomizeDir, err) + } + + // Kustomize does not have a built in way to specify environment variables + // A common well-known solution is to use the kustomize configMapGenerator within your kustomization.yaml + // and then generate a .env file that can be used to generate config maps + // azd can help here to create an .env file from the map specified within azure.yaml kustomize config section + if len(serviceConfig.K8s.Kustomize.Env) > 0 { + builder := strings.Builder{} + for key, exp := range serviceConfig.K8s.Kustomize.Env { + value, err := exp.Envsubst(t.env.Getenv) + if err != nil { + return false, fmt.Errorf("failed to envsubst kustomize env: %w", err) + } + + builder.WriteString(fmt.Sprintf("%s=%s\n", key, value)) + } + + // We are manually writing the .env file since k8s config maps expect unquoted values + // The godotenv library will quote values when writing the file without an option to disable + envFilePath := filepath.Join(kustomizeDir, ".env") + if err := os.WriteFile(envFilePath, []byte(builder.String()), osutil.PermissionFile); err != nil { + return false, fmt.Errorf("failed to write kustomize .env: %w", err) + } + + defer os.Remove(envFilePath) + } + + // Another common scenario is to use the kustomize edit commands to modify the kustomization.yaml + // configuration before applying the manifests. + // Common scenarios for this would be for modifying the images or namespace used for the deployment + for _, edit := range serviceConfig.K8s.Kustomize.Edits { + editArgs, err := edit.Envsubst(t.env.Getenv) + if err != nil { + return false, fmt.Errorf("failed to envsubst kustomize edit: %w", err) + } + + if err := t.kustomizeCli. + WithCwd(kustomizeDir). + Edit(ctx, strings.Split(editArgs, " ")...); err != nil { + return false, err + } + } + + // Finally apply manifests with kustomize using the -k flag + if err := t.kubectl.ApplyWithKustomize(ctx, kustomizeDir, nil); err != nil { + return false, err + } + + return true, nil +} + +// deployHelmCharts deploys helm charts to the k8s cluster +func (t *aksTarget) deployHelmCharts( + ctx context.Context, serviceConfig *ServiceConfig, + task *async.TaskContextWithProgress[*ServiceDeployResult, ServiceProgress], +) (bool, error) { + if serviceConfig.K8s.Helm == nil { + return false, nil + } + + if !t.featureManager.IsEnabled(featureHelm) { + return false, fmt.Errorf("Helm support is not enabled. Run '%s' to enable it.", alpha.GetEnableCommand(featureHelm)) + } + + for _, repo := range serviceConfig.K8s.Helm.Repositories { + task.SetProgress(NewServiceProgress(fmt.Sprintf("Configuring helm repo: %s", repo.Name))) + if err := t.helmCli.AddRepo(ctx, repo); err != nil { + return false, err + } + + if err := t.helmCli.UpdateRepo(ctx, repo.Name); err != nil { + return false, err + } + } + + for _, release := range serviceConfig.K8s.Helm.Releases { + if release.Namespace == "" { + release.Namespace = t.getK8sNamespace(serviceConfig) + } + + if err := t.ensureNamespace(ctx, release.Namespace); err != nil { + return false, err + } + + task.SetProgress(NewServiceProgress(fmt.Sprintf("Installing helm release: %s", release.Name))) + if err := t.helmCli.Upgrade(ctx, release); err != nil { + return false, err + } + + task.SetProgress(NewServiceProgress(fmt.Sprintf("Checking helm release status: %s", release.Name))) + err := retry.Do( + ctx, + retry.WithMaxDuration(10*time.Minute, retry.NewConstant(5*time.Second)), + func(ctx context.Context) error { + status, err := t.helmCli.Status(ctx, release) + if err != nil { + return err + } + + if status.Info.Status != helm.StatusKindDeployed { + fmt.Printf("Status: %s\n", status.Info.Status) + return retry.RetryableError( + fmt.Errorf("helm release '%s' is not ready, %w", release.Name, err), + ) + } + + return nil + }, + ) + + if err != nil { + return false, err + } + } + + return true, nil +} + // Gets the service endpoints for the AKS service target func (t *aksTarget) Endpoints( ctx context.Context, @@ -507,11 +749,21 @@ func (t *aksTarget) getServiceEndpoints( var endpoints []string if service.Spec.Type == kubectl.ServiceTypeLoadBalancer { for _, resource := range service.Status.LoadBalancer.Ingress { - endpoints = append(endpoints, fmt.Sprintf("http://%s, (Service, Type: LoadBalancer)", resource.Ip)) + endpoints = append( + endpoints, + fmt.Sprintf("http://%s, (Service: %s, Type: LoadBalancer)", resource.Ip, service.Metadata.Name), + ) } } else if service.Spec.Type == kubectl.ServiceTypeClusterIp { for index, ip := range service.Spec.ClusterIps { - endpoints = append(endpoints, fmt.Sprintf("http://%s:%d, (Service, Type: ClusterIP)", ip, service.Spec.Ports[index].Port)) + endpoints = append( + endpoints, + fmt.Sprintf("http://%s:%d, (Service: %s, Type: ClusterIP)", + ip, + service.Spec.Ports[index].Port, + service.Metadata.Name, + ), + ) } } diff --git a/cli/azd/pkg/project/service_target_aks_test.go b/cli/azd/pkg/project/service_target_aks_test.go index d370e94add6..761e74169c4 100644 --- a/cli/azd/pkg/project/service_target_aks_test.go +++ b/cli/azd/pkg/project/service_target_aks_test.go @@ -14,12 +14,16 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2" + "github.com/azure/azure-dev/cli/azd/pkg/alpha" "github.com/azure/azure-dev/cli/azd/pkg/async" + "github.com/azure/azure-dev/cli/azd/pkg/config" "github.com/azure/azure-dev/cli/azd/pkg/convert" "github.com/azure/azure-dev/cli/azd/pkg/environment" "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/pkg/helm" "github.com/azure/azure-dev/cli/azd/pkg/infra" "github.com/azure/azure-dev/cli/azd/pkg/kubelogin" + "github.com/azure/azure-dev/cli/azd/pkg/kustomize" "github.com/azure/azure-dev/cli/azd/pkg/osutil" "github.com/azure/azure-dev/cli/azd/pkg/tools/azcli" "github.com/azure/azure-dev/cli/azd/pkg/tools/docker" @@ -43,7 +47,7 @@ func Test_NewAksTarget(t *testing.T) { serviceConfig := createTestServiceConfig("./src/api", AksTarget, ServiceLanguageTypeScript) env := createEnv() - serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) + serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env, nil) require.NotNil(t, serviceTarget) require.NotNil(t, serviceConfig) @@ -60,7 +64,8 @@ func Test_Required_Tools(t *testing.T) { serviceConfig := createTestServiceConfig(tempDir, AksTarget, ServiceLanguageTypeScript) env := createEnv() - serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) + userConfig := config.NewConfig(nil) + serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env, userConfig) requiredTools := serviceTarget.RequiredExternalTools(*mockContext.Context) require.Len(t, requiredTools, 2) @@ -68,6 +73,30 @@ func Test_Required_Tools(t *testing.T) { require.Implements(t, new(kubectl.KubectlCli), requiredTools[1]) } +func Test_Required_Tools_WithAlpha(t *testing.T) { + tempDir := t.TempDir() + ostest.Chdir(t, tempDir) + + mockContext := mocks.NewMockContext(context.Background()) + err := setupMocksForAksTarget(mockContext) + require.NoError(t, err) + + serviceConfig := createTestServiceConfig(tempDir, AksTarget, ServiceLanguageTypeScript) + env := createEnv() + + userConfig := config.NewConfig(nil) + _ = userConfig.Set("alpha.aks.helm", "on") + _ = userConfig.Set("alpha.aks.kustomize", "on") + serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env, userConfig) + + requiredTools := serviceTarget.RequiredExternalTools(*mockContext.Context) + require.Len(t, requiredTools, 4) + require.Implements(t, new(docker.Docker), requiredTools[0]) + require.Implements(t, new(kubectl.KubectlCli), requiredTools[1]) + require.IsType(t, &helm.Cli{}, requiredTools[2]) + require.IsType(t, &kustomize.Cli{}, requiredTools[3]) +} + func Test_Package_Deploy_HappyPath(t *testing.T) { tempDir := t.TempDir() ostest.Chdir(t, tempDir) @@ -79,7 +108,7 @@ func Test_Package_Deploy_HappyPath(t *testing.T) { serviceConfig := createTestServiceConfig(tempDir, AksTarget, ServiceLanguageTypeScript) env := createEnv() - serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) + serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env, nil) err = simulateInitliaze(*mockContext.Context, serviceTarget, serviceConfig) require.NoError(t, err) @@ -133,7 +162,7 @@ func Test_Resolve_Cluster_Name(t *testing.T) { serviceConfig := createTestServiceConfig(tempDir, AksTarget, ServiceLanguageTypeScript) env := createEnv() - serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) + serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env, nil) err = simulateInitliaze(*mockContext.Context, serviceTarget, serviceConfig) require.NoError(t, err) }) @@ -144,13 +173,13 @@ func Test_Resolve_Cluster_Name(t *testing.T) { require.NoError(t, err) serviceConfig := createTestServiceConfig(tempDir, AksTarget, ServiceLanguageTypeScript) - serviceConfig.ResourceName = NewExpandableString("AKS_CLUSTER") + serviceConfig.ResourceName = osutil.NewExpandableString("AKS_CLUSTER") env := createEnv() // Remove default AKS cluster name from env file env.DotenvDelete(environment.AksClusterEnvVarName) - serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) + serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env, nil) err = simulateInitliaze(*mockContext.Context, serviceTarget, serviceConfig) require.NoError(t, err) }) @@ -161,14 +190,14 @@ func Test_Resolve_Cluster_Name(t *testing.T) { require.NoError(t, err) serviceConfig := createTestServiceConfig(tempDir, AksTarget, ServiceLanguageTypeScript) - serviceConfig.ResourceName = NewExpandableString("${MY_CUSTOM_ENV_VAR}") + serviceConfig.ResourceName = osutil.NewExpandableString("${MY_CUSTOM_ENV_VAR}") env := createEnv() env.DotenvSet("MY_CUSTOM_ENV_VAR", "AKS_CLUSTER") // Remove default AKS cluster name from env file env.DotenvDelete(environment.AksClusterEnvVarName) - serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) + serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env, nil) err = simulateInitliaze(*mockContext.Context, serviceTarget, serviceConfig) require.NoError(t, err) }) @@ -187,7 +216,7 @@ func Test_Resolve_Cluster_Name(t *testing.T) { // Simulate AKS cluster name not found in env file env.DotenvDelete(environment.AksClusterEnvVarName) - serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) + serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env, nil) err = simulateInitliaze(*mockContext.Context, serviceTarget, serviceConfig) require.Error(t, err) require.ErrorContains(t, err, "could not determine AKS cluster") @@ -210,12 +239,145 @@ func Test_Deploy_No_Credentials(t *testing.T) { serviceConfig := createTestServiceConfig(tempDir, AksTarget, ServiceLanguageTypeScript) env := createEnv() - serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) + serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env, nil) err = simulateInitliaze(*mockContext.Context, serviceTarget, serviceConfig) require.Error(t, err) require.ErrorContains(t, err, "failed retrieving cluster user credentials") } +func Test_Deploy_Helm(t *testing.T) { + tempDir := t.TempDir() + ostest.Chdir(t, tempDir) + + mockContext := mocks.NewMockContext(context.Background()) + err := setupMocksForAksTarget(mockContext) + require.NoError(t, err) + + mockResults, err := setupMocksForHelm(mockContext) + require.NoError(t, err) + + serviceConfig := *createTestServiceConfig(tempDir, AksTarget, ServiceLanguageTypeScript) + serviceConfig.RelativePath = "" + serviceConfig.K8s.Helm = &helm.Config{ + Repositories: []*helm.Repository{ + { + Name: "argo", + Url: "https://argoproj.github.io/argo-helm", + }, + }, + Releases: []*helm.Release{ + { + Name: "argocd", + Chart: "argo/argo-cd", + Version: "5.51.4", + }, + }, + } + + env := createEnv() + userConfig := config.NewConfig(nil) + _ = userConfig.Set("alpha.aks.helm", "on") + + serviceTarget := createAksServiceTarget(mockContext, &serviceConfig, env, userConfig) + err = simulateInitliaze(*mockContext.Context, serviceTarget, &serviceConfig) + require.NoError(t, err) + + packageResult := &ServicePackageResult{ + PackagePath: "test-app/api-test:azd-deploy-0", + Details: &dockerPackageResult{ + ImageHash: "IMAGE_HASH", + ImageTag: "test-app/api-test:azd-deploy-0", + }, + } + + scope := environment.NewTargetResource("SUB_ID", "RG_ID", "", string(infra.AzureResourceTypeManagedCluster)) + deployTask := serviceTarget.Deploy(*mockContext.Context, &serviceConfig, packageResult, scope) + logProgress(deployTask) + deployResult, err := deployTask.Await() + + require.NoError(t, err) + require.NotNil(t, deployResult) + + repoAdd, repoAddCalled := mockResults["helm-repo-add"] + require.True(t, repoAddCalled) + require.Equal(t, []string{"repo", "add", "argo", "https://argoproj.github.io/argo-helm"}, repoAdd.Args) + + repoUpdate, repoUpdateCalled := mockResults["helm-repo-update"] + require.True(t, repoUpdateCalled) + require.Equal(t, []string{"repo", "update", "argo"}, repoUpdate.Args) + + helmUpgrade, helmUpgradeCalled := mockResults["helm-upgrade"] + require.True(t, helmUpgradeCalled) + require.Contains(t, strings.Join(helmUpgrade.Args, " "), "upgrade argocd argo/argo-cd") + + helmStatus, helmStatusCalled := mockResults["helm-status"] + require.True(t, helmStatusCalled) + require.Contains(t, strings.Join(helmStatus.Args, " "), "status argocd") +} + +func Test_Deploy_Kustomize(t *testing.T) { + tempDir := t.TempDir() + ostest.Chdir(t, tempDir) + + mockContext := mocks.NewMockContext(context.Background()) + err := setupMocksForAksTarget(mockContext) + require.NoError(t, err) + + mockResults, err := setupMocksForKustomize(mockContext) + require.NoError(t, err) + + serviceConfig := *createTestServiceConfig(tempDir, AksTarget, ServiceLanguageTypeScript) + serviceConfig.RelativePath = "" + serviceConfig.K8s.Kustomize = &kustomize.Config{ + Directory: osutil.NewExpandableString("./kustomize/overlays/dev"), + Edits: []osutil.ExpandableString{ + osutil.NewExpandableString("set image todo-api=${SERVICE_API_IMAGE_NAME}"), + }, + } + + err = os.MkdirAll(filepath.Join(tempDir, "./kustomize/overlays/dev"), osutil.PermissionDirectory) + require.NoError(t, err) + + env := createEnv() + env.DotenvSet("SERVICE_API_IMAGE_NAME", "REGISTRY.azurecr.io/test-app/api-test:azd-deploy-0") + + userConfig := config.NewConfig(nil) + _ = userConfig.Set("alpha.aks.kustomize", "on") + + serviceTarget := createAksServiceTarget(mockContext, &serviceConfig, env, userConfig) + err = simulateInitliaze(*mockContext.Context, serviceTarget, &serviceConfig) + require.NoError(t, err) + + packageResult := &ServicePackageResult{ + PackagePath: "test-app/api-test:azd-deploy-0", + Details: &dockerPackageResult{ + ImageHash: "IMAGE_HASH", + ImageTag: "test-app/api-test:azd-deploy-0", + }, + } + + scope := environment.NewTargetResource("SUB_ID", "RG_ID", "", string(infra.AzureResourceTypeManagedCluster)) + deployTask := serviceTarget.Deploy(*mockContext.Context, &serviceConfig, packageResult, scope) + logProgress(deployTask) + deployResult, err := deployTask.Await() + + require.NoError(t, err) + require.NotNil(t, deployResult) + + kustomizeEdit, kustomizeEditCalled := mockResults["kustomize-edit"] + require.True(t, kustomizeEditCalled) + require.Equal(t, []string{ + "edit", + "set", + "image", + "todo-api=REGISTRY.azurecr.io/test-app/api-test:azd-deploy-0", + }, kustomizeEdit.Args) + + kubectlApplyKustomize, kubectlApplyKustomizeCalled := mockResults["kubectl-apply-kustomize"] + require.True(t, kubectlApplyKustomizeCalled) + require.Equal(t, []string{"apply", "-k", filepath.FromSlash("kustomize/overlays/dev")}, kubectlApplyKustomize.Args) +} + func setupK8sManifests(t *testing.T, serviceConfig *ServiceConfig) error { manifestsDir := filepath.Join(serviceConfig.RelativePath, defaultDeploymentPath) err := os.MkdirAll(manifestsDir, osutil.PermissionDirectory) @@ -231,6 +393,65 @@ func setupK8sManifests(t *testing.T, serviceConfig *ServiceConfig) error { return nil } +func setupMocksForHelm(mockContext *mocks.MockContext) (map[string]exec.RunArgs, error) { + result := map[string]exec.RunArgs{} + + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm repo add") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + result["helm-repo-add"] = args + return exec.NewRunResult(0, "", ""), nil + }) + + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm repo update") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + result["helm-repo-update"] = args + return exec.NewRunResult(0, "", ""), nil + }) + + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm upgrade") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + result["helm-upgrade"] = args + return exec.NewRunResult(0, "", ""), nil + }) + + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "helm status") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + result["helm-status"] = args + statusResult := `{ + "info": { + "status": "deployed" + } + }` + return exec.NewRunResult(0, statusResult, ""), nil + }) + + return result, nil +} + +func setupMocksForKustomize(mockContext *mocks.MockContext) (map[string]exec.RunArgs, error) { + result := map[string]exec.RunArgs{} + + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kustomize edit") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + result["kustomize-edit"] = args + return exec.NewRunResult(0, "", ""), nil + }) + + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kubectl apply -k") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + result["kubectl-apply-kustomize"] = args + return exec.NewRunResult(0, "", ""), nil + }) + + return result, nil +} + func setupMocksForAksTarget(mockContext *mocks.MockContext) error { err := setupListClusterAdminCredentialsMock(mockContext, http.StatusOK) if err != nil { @@ -577,8 +798,11 @@ func createAksServiceTarget( mockContext *mocks.MockContext, serviceConfig *ServiceConfig, env *environment.Environment, + userConfig config.Config, ) ServiceTarget { kubeCtl := kubectl.NewKubectl(mockContext.CommandRunner) + helmCli := helm.NewCli(mockContext.CommandRunner) + kustomizeCli := kustomize.NewCli(mockContext.CommandRunner) dockerCli := docker.NewDocker(mockContext.CommandRunner) kubeLoginCli := kubelogin.NewCli(mockContext.CommandRunner) credentialProvider := mockaccount.SubscriptionCredentialProviderFunc( @@ -604,6 +828,13 @@ func createAksServiceTarget( containerRegistryService := azcli.NewContainerRegistryService(credentialProvider, mockContext.HttpClient, dockerCli) containerHelper := NewContainerHelper(env, envManager, clock.NewMock(), containerRegistryService, dockerCli) + if userConfig == nil { + userConfig = config.NewConfig(nil) + } + + configManager := &mockUserConfigManager{} + configManager.On("Load").Return(userConfig, nil) + return NewAksTarget( env, envManager, @@ -612,7 +843,10 @@ func createAksServiceTarget( resourceManager, kubeCtl, kubeLoginCli, + helmCli, + kustomizeCli, containerHelper, + alpha.NewFeaturesManagerWithConfig(userConfig), ) } @@ -714,3 +948,17 @@ func (m *MockResourceManager) GetTargetResource( args := m.Called(ctx, subscriptionId, serviceConfig) return args.Get(0).(*environment.TargetResource), args.Error(1) } + +type mockUserConfigManager struct { + mock.Mock +} + +func (m *mockUserConfigManager) Load() (config.Config, error) { + args := m.Called() + return args.Get(0).(config.Config), args.Error(1) +} + +func (m *mockUserConfigManager) Save(config config.Config) error { + args := m.Called(config) + return args.Error(0) +} diff --git a/cli/azd/pkg/tools/kubectl/kubectl.go b/cli/azd/pkg/tools/kubectl/kubectl.go index a4d8e44ab09..a8ab1e0cf29 100644 --- a/cli/azd/pkg/tools/kubectl/kubectl.go +++ b/cli/azd/pkg/tools/kubectl/kubectl.go @@ -39,6 +39,8 @@ type KubectlCli interface { Exec(ctx context.Context, flags *KubeCliFlags, args ...string) (exec.RunResult, error) // Gets the deployment rollout status RolloutStatus(ctx context.Context, deploymentName string, flags *KubeCliFlags) (*exec.RunResult, error) + // Applies the manifests at the specified path using kustomize + ApplyWithKustomize(ctx context.Context, path string, flags *KubeCliFlags) error } type OutputType string @@ -228,6 +230,18 @@ func (cli *kubectlCli) Apply(ctx context.Context, path string, flags *KubeCliFla return nil } +// Applies the manifests at the specified path using kustomize +func (cli *kubectlCli) ApplyWithKustomize(ctx context.Context, path string, flags *KubeCliFlags) error { + runArgs := exec.NewRunArgs("kubectl", "apply", "-k", path) + + _, err := cli.executeCommandWithArgs(ctx, runArgs, flags) + if err != nil { + return fmt.Errorf("failing running kubectl apply -k: %w", err) + } + + return nil +} + // Creates a new k8s namespace with the specified name func (cli *kubectlCli) CreateNamespace(ctx context.Context, name string, flags *KubeCliFlags) (*exec.RunResult, error) { args := []string{"create", "namespace", name} diff --git a/cli/azd/resources/alpha_features.yaml b/cli/azd/resources/alpha_features.yaml index 074fcfff621..70080fcce70 100644 --- a/cli/azd/resources/alpha_features.yaml +++ b/cli/azd/resources/alpha_features.yaml @@ -1,4 +1,8 @@ - id: resourceGroupDeployments description: "Support infrastructure deployments at resource group scope." - id: infraSynth - description: "Enable the `infra synth` command to write generated infrastructure to disk." \ No newline at end of file + description: "Enable the `infra synth` command to write generated infrastructure to disk." +- id: aks.helm + description: "Enable Helm support for AKS deployments." +- id: aks.kustomize + description: "Enable Kustomize support for AKS deployments." \ No newline at end of file diff --git a/schemas/alpha/azure.yaml.json b/schemas/alpha/azure.yaml.json index dd25b867b40..6cc6fb2831a 100644 --- a/schemas/alpha/azure.yaml.json +++ b/schemas/alpha/azure.yaml.json @@ -617,6 +617,107 @@ "description": "When set will be appended to the root of your ingress resource path." } } + }, + "helm": { + "type": "object", + "title": "Optional. The helm configuration", + "additionalProperties": false, + "properties": { + "repositories": { + "type": "array", + "title": "Optional. The helm repositories to add", + "description": "When set will add the helm repositories to the helm client.", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "url" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the helm repository", + "description": "The name of the helm repository to add." + }, + "url": { + "type": "string", + "title": "The url of the helm repository", + "description": "The url of the helm repository to add." + } + } + } + }, + "releases": { + "type": "array", + "title": "Optional. The helm releases to install", + "description": "When set will install the helm releases to the k8s cluster.", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "chart" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the helm release", + "description": "The name of the helm release to install." + }, + "chart": { + "type": "string", + "title": "The name of the helm chart", + "description": "The name of the helm chart to install." + }, + "version": { + "type": "string", + "title": "The version of the helm chart", + "description": "The version of the helm chart to install." + }, + "values": { + "type": "string", + "title": "Optional. Relative path from service to a values.yaml to pass to the helm chart", + "description": "When set will pass the values to the helm chart." + } + } + } + } + } + }, + "kustomize": { + "type": "object", + "title": "Optional. The kustomize configuration", + "additionalProperties": false, + "properties": { + "dir": { + "type": "string", + "title": "Optional. The relative path to the kustomize directory.", + "description": "When set will use the kustomize directory to deploy to the k8s cluster. Supports environment variable substitution." + }, + "edits": { + "type": "array", + "title": "Optional. The kustomize edits to apply before deployment.", + "description": "When set will apply the edits to the kustomize directory before deployment. Supports environment variable substitution.", + "items": { + "type": "string" + } + }, + "env": { + "type": "object", + "title": "Optional. The environment key/value pairs used to generate a .env file.", + "description": "When set will generate a .env file in the kustomize directory. Values support environment variable substitution.", + "additionalProperties": { + "type": [ + "string", + "boolean", + "number" + ] + } + } + } } } },