diff --git a/.vscode/cspell.global.yaml b/.vscode/cspell.global.yaml index 4ede35babbb..e39b7b1f7ba 100644 --- a/.vscode/cspell.global.yaml +++ b/.vscode/cspell.global.yaml @@ -24,6 +24,7 @@ ignoreWords: - armauthorization - armappcontainers - armappservice + - armcontainerservice - armkeyvault - armresources - armruntime @@ -34,6 +35,7 @@ ignoreWords: - Azdo - aztfmod - azurecaf + - azurecr - azuredevops - azurerm - armcontainerregistry @@ -48,6 +50,7 @@ ignoreWords: - configlist - conjunction - containerregistry + - containerservice - databricks - dedb - devcontainer @@ -73,6 +76,9 @@ ignoreWords: - JOBOBJECT - kubernetes - kusto + - kubeconfig + - kubeconfigs + - kustomize - magefile - mainfic - menuid @@ -156,6 +162,7 @@ ignoreWords: - menuid - PLACEHOLDERIACTOOLS - LOGANALYTICS + - webapprouting - zipdeploy - appinsights useGitignore: true diff --git a/.vscode/launch.json b/.vscode/launch.json index 76ff9e18248..4c54285757e 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1,15 +1,15 @@ { - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "name": "Attach to Process", - "type": "go", - "request": "attach", - "mode": "local", - "processId": "${command:pickGoProcess}" - } - ] -} + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Attach to Process", + "type": "go", + "request": "attach", + "mode": "local", + "processId": "${command:pickGoProcess}" + } + ] +} \ No newline at end of file diff --git a/cli/azd/cmd/container.go b/cli/azd/cmd/container.go index 4954154b5e3..56be47ff101 100644 --- a/cli/azd/cmd/container.go +++ b/cli/azd/cmd/container.go @@ -132,7 +132,9 @@ func registerCommonDependencies(container *ioc.NestedContainer) { credProvider auth.MultiTenantCredentialProvider) (azcore.TokenCredential, error) { if env == nil { //nolint:lll - panic("command asked for azcore.TokenCredential, but prerequisite dependency environment.Environment was not registered.") + panic( + "command asked for azcore.TokenCredential, but prerequisite dependency environment. Environment was not registered.", + ) } subscriptionId := env.GetSubscriptionId() diff --git a/cli/azd/pkg/account/manager_test.go b/cli/azd/pkg/account/manager_test.go index bdb7ec1117d..cc240e992e4 100644 --- a/cli/azd/pkg/account/manager_test.go +++ b/cli/azd/pkg/account/manager_test.go @@ -111,7 +111,11 @@ func Test_GetAccountDefaults(t *testing.T) { require.NoError(t, err) accountDefaults, err := manager.GetAccountDefaults(context.Background()) - require.Equal(t, &Account{DefaultSubscription: (*Subscription)(nil), DefaultLocation: (&defaultLocation)}, accountDefaults) + require.Equal( + t, + &Account{DefaultSubscription: (*Subscription)(nil), DefaultLocation: (&defaultLocation)}, + accountDefaults, + ) require.NoError(t, err) }) diff --git a/cli/azd/pkg/azure/resource_ids.go b/cli/azd/pkg/azure/resource_ids.go index c5b056adf66..af680615f6e 100644 --- a/cli/azd/pkg/azure/resource_ids.go +++ b/cli/azd/pkg/azure/resource_ids.go @@ -51,6 +51,15 @@ func WebsiteRID(subscriptionId, resourceGroupName, websiteName string) string { return returnValue } +func AksRID(subscriptionId, resourceGroupName, clusterName string) string { + returnValue := fmt.Sprintf( + "%s/providers/Microsoft.ContainerService/managedClusters/%s", + ResourceGroupRID(subscriptionId, resourceGroupName), + clusterName, + ) + return returnValue +} + func ContainerAppRID(subscriptionId, resourceGroupName, containerAppName string) string { returnValue := fmt.Sprintf( "%s/providers/Microsoft.App/containerApps/%s", @@ -60,6 +69,15 @@ func ContainerAppRID(subscriptionId, resourceGroupName, containerAppName string) return returnValue } +func KubernetesServiceRID(subscriptionId, resourceGroupName, clusterName string) string { + returnValue := fmt.Sprintf( + "%s/providers/Microsoft.ContainerService/managedClusters/%s", + ResourceGroupRID(subscriptionId, resourceGroupName), + clusterName, + ) + return returnValue +} + func StaticWebAppRID(subscriptionId, resourceGroupName, staticSiteName string) string { returnValue := fmt.Sprintf( "%s/providers/Microsoft.Web/staticSites/%s", diff --git a/cli/azd/pkg/environment/environment.go b/cli/azd/pkg/environment/environment.go index a07d31cf8fd..994d4d2222b 100644 --- a/cli/azd/pkg/environment/environment.go +++ b/cli/azd/pkg/environment/environment.go @@ -36,6 +36,9 @@ const TenantIdEnvVarName = "AZURE_TENANT_ID" // to. const ContainerRegistryEndpointEnvVarName = "AZURE_CONTAINER_REGISTRY_ENDPOINT" +// AksClusterEnvVarName is the name of they key used to store the endpoint of the AKS cluster to push to. +const AksClusterEnvVarName = "AZURE_AKS_CLUSTER_NAME" + // ResourceGroupEnvVarName is the name of the azure resource group that should be used for deployments const ResourceGroupEnvVarName = "AZURE_RESOURCE_GROUP" diff --git a/cli/azd/pkg/exec/command_runner.go b/cli/azd/pkg/exec/command_runner.go index bd4961e8d12..585b331ba65 100644 --- a/cli/azd/pkg/exec/command_runner.go +++ b/cli/azd/pkg/exec/command_runner.go @@ -58,7 +58,14 @@ func (r *commandRunner) Run(ctx context.Context, args RunArgs) (RunResult, error cmd.Dir = args.Cwd - var stdin, stdout, stderr bytes.Buffer + var stdin io.Reader + if args.StdIn != nil { + stdin = args.StdIn + } else { + stdin = new(bytes.Buffer) + } + + var stdout, stderr bytes.Buffer cmd.Env = appendEnv(args.Env) @@ -67,7 +74,7 @@ func (r *commandRunner) Run(ctx context.Context, args RunArgs) (RunResult, error cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr } else { - cmd.Stdin = &stdin + cmd.Stdin = stdin cmd.Stdout = &stdout cmd.Stderr = &stderr diff --git a/cli/azd/pkg/exec/runargs.go b/cli/azd/pkg/exec/runargs.go index 7c121c5fbe3..ee370a14bdf 100644 --- a/cli/azd/pkg/exec/runargs.go +++ b/cli/azd/pkg/exec/runargs.go @@ -29,6 +29,9 @@ type RunArgs struct { // When set will attach commands to std input/output Interactive bool + + // When set will call the command with the specified StdIn + StdIn io.Reader } // NewRunArgs creates a new instance with the specified cmd and args @@ -81,3 +84,8 @@ func (b RunArgs) WithDebug(debug bool) RunArgs { b.Debug = debug return b } + +func (b RunArgs) WithStdIn(stdIn io.Reader) RunArgs { + b.StdIn = stdIn + return b +} diff --git a/cli/azd/pkg/infra/azure_resource_types.go b/cli/azd/pkg/infra/azure_resource_types.go index c372a3ef5d2..a5d9b2d4cba 100644 --- a/cli/azd/pkg/infra/azure_resource_types.go +++ b/cli/azd/pkg/infra/azure_resource_types.go @@ -25,7 +25,10 @@ const ( AzureResourceTypeResourceGroup AzureResourceType = "Microsoft.Resources/resourceGroups" AzureResourceTypeStorageAccount AzureResourceType = "Microsoft.Storage/storageAccounts" AzureResourceTypeStaticWebSite AzureResourceType = "Microsoft.Web/staticSites" + AzureResourceTypeContainerRegistry AzureResourceType = "Microsoft.ContainerRegistry/registries" + AzureResourceTypeManagedCluster AzureResourceType = "Microsoft.ContainerService/managedClusters" AzureResourceTypeServicePlan AzureResourceType = "Microsoft.Web/serverfarms" + AzureResourceTypeAgentPool AzureResourceType = "Microsoft.ContainerService/managedClusters/agentPools" AzureResourceTypeSqlServer AzureResourceType = "Microsoft.Sql/servers" AzureResourceTypeVirtualNetwork AzureResourceType = "Microsoft.Network/virtualNetworks" AzureResourceTypeWebSite AzureResourceType = "Microsoft.Web/sites" @@ -78,6 +81,12 @@ func GetResourceTypeDisplayName(resourceType AzureResourceType) string { return "Load Tests" case AzureResourceTypeVirtualNetwork: return "Virtual Network" + case AzureResourceTypeContainerRegistry: + return "Container Registry" + case AzureResourceTypeManagedCluster: + return "AKS Managed Cluster" + case AzureResourceTypeAgentPool: + return "AKS Agent Pool" } return "" diff --git a/cli/azd/pkg/project/framework_service_docker.go b/cli/azd/pkg/project/framework_service_docker.go index 5b3a51a51c7..73ff59d4c3c 100644 --- a/cli/azd/pkg/project/framework_service_docker.go +++ b/cli/azd/pkg/project/framework_service_docker.go @@ -23,7 +23,7 @@ type DockerProjectOptions struct { type dockerProject struct { config *ServiceConfig env *environment.Environment - docker *docker.Docker + docker docker.Docker framework FrameworkService } @@ -66,7 +66,7 @@ func (p *dockerProject) Initialize(ctx context.Context) error { func NewDockerProject( config *ServiceConfig, env *environment.Environment, - docker *docker.Docker, + docker docker.Docker, framework FrameworkService, ) FrameworkService { return &dockerProject{ diff --git a/cli/azd/pkg/project/service_config.go b/cli/azd/pkg/project/service_config.go index 5a5e3526117..eeba705b81e 100644 --- a/cli/azd/pkg/project/service_config.go +++ b/cli/azd/pkg/project/service_config.go @@ -17,6 +17,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/tools" "github.com/azure/azure-dev/cli/azd/pkg/tools/azcli" "github.com/azure/azure-dev/cli/azd/pkg/tools/docker" + "github.com/azure/azure-dev/cli/azd/pkg/tools/kubectl" "github.com/azure/azure-dev/cli/azd/pkg/tools/swa" ) @@ -39,6 +40,8 @@ type ServiceConfig struct { Module string `yaml:"module"` // The optional docker options Docker DockerProjectOptions `yaml:"docker"` + // The optional K8S / AKS options + K8s AksOptions `yaml:"k8s"` // The infrastructure provisioning configuration Infra provisioning.Options `yaml:"infra"` // Hook configuration for service @@ -124,6 +127,20 @@ func (sc *ServiceConfig) GetServiceTarget( target, err = NewFunctionAppTarget(sc, env, resource, azCli) case string(StaticWebAppTarget): target, err = NewStaticWebAppTarget(sc, env, resource, azCli, swa.NewSwaCli(commandRunner)) + case string(AksTarget): + containerService, err := azCli.ContainerService(ctx, env.GetSubscriptionId()) + if err != nil { + return nil, err + } + target = NewAksTarget( + sc, + env, + resource, + azCli, + containerService, + kubectl.NewKubectl(commandRunner), + docker.NewDocker(commandRunner), + ) default: return nil, fmt.Errorf("unsupported host '%s' for service '%s'", sc.Host, sc.Name) } @@ -154,7 +171,7 @@ func (sc *ServiceConfig) GetFrameworkService( } // For containerized applications we use a nested framework service - if sc.Host == string(ContainerAppTarget) { + if sc.Host == string(ContainerAppTarget) || sc.Host == string(AksTarget) { sourceFramework := frameworkService frameworkService = NewDockerProject(sc, env, docker.NewDocker(commandRunner), sourceFramework) } diff --git a/cli/azd/pkg/project/service_target.go b/cli/azd/pkg/project/service_target.go index 71cecbd61f1..d38c4761304 100644 --- a/cli/azd/pkg/project/service_target.go +++ b/cli/azd/pkg/project/service_target.go @@ -20,6 +20,7 @@ const ( ContainerAppTarget ServiceTargetKind = "containerapp" AzureFunctionTarget ServiceTargetKind = "function" StaticWebAppTarget ServiceTargetKind = "staticwebapp" + AksTarget ServiceTargetKind = "aks" ) type ServiceDeploymentResult struct { @@ -89,10 +90,5 @@ func resourceTypeMismatchError( // As an example, ContainerAppTarget is able to provision the container app as part of deployment, // and thus returns true. func (st ServiceTargetKind) SupportsDelayedProvisioning() bool { - return st == ContainerAppTarget + return st == ContainerAppTarget || st == AksTarget } - -var _ ServiceTarget = &appServiceTarget{} -var _ ServiceTarget = &containerAppTarget{} -var _ ServiceTarget = &functionAppTarget{} -var _ ServiceTarget = &staticWebAppTarget{} diff --git a/cli/azd/pkg/project/service_target_aks.go b/cli/azd/pkg/project/service_target_aks.go new file mode 100644 index 00000000000..7dfec400fba --- /dev/null +++ b/cli/azd/pkg/project/service_target_aks.go @@ -0,0 +1,434 @@ +package project + +import ( + "context" + "errors" + "fmt" + "log" + "net/url" + "path/filepath" + "strings" + + "github.com/azure/azure-dev/cli/azd/pkg/azure" + "github.com/azure/azure-dev/cli/azd/pkg/environment" + "github.com/azure/azure-dev/cli/azd/pkg/environment/azdcontext" + "github.com/azure/azure-dev/cli/azd/pkg/tools" + "github.com/azure/azure-dev/cli/azd/pkg/tools/azcli" + "github.com/azure/azure-dev/cli/azd/pkg/tools/docker" + "github.com/azure/azure-dev/cli/azd/pkg/tools/kubectl" + "github.com/benbjohnson/clock" +) + +type AksOptions struct { + Namespace string `yaml:"namespace"` + Ingress AksIngressOptions `yaml:"ingress"` + Deployment AksDeploymentOptions `yaml:"deployment"` + Service AksServiceOptions `yaml:"service"` +} + +type AksIngressOptions struct { + Name string `yaml:"name"` + RelativePath string `yaml:"relativePath"` +} + +type AksDeploymentOptions struct { + Name string `yaml:"name"` +} + +type AksServiceOptions struct { + Name string `yaml:"name"` +} + +type aksTarget struct { + config *ServiceConfig + env *environment.Environment + scope *environment.TargetResource + containerService azcli.ContainerServiceClient + az azcli.AzCli + docker docker.Docker + kubectl kubectl.KubectlCli + clock clock.Clock +} + +func (t *aksTarget) RequiredExternalTools() []tools.ExternalTool { + return []tools.ExternalTool{t.docker} +} + +func (t *aksTarget) Deploy( + ctx context.Context, + azdCtx *azdcontext.AzdContext, + path string, + progress chan<- string, +) (ServiceDeploymentResult, error) { + // Login to AKS cluster + namespace := t.getK8sNamespace() + clusterName, has := t.env.Values[environment.AksClusterEnvVarName] + if !has { + return ServiceDeploymentResult{}, fmt.Errorf( + "could not determine AKS cluster, ensure %s is set as an output of your infrastructure", + environment.AksClusterEnvVarName, + ) + } + + // Login to container registry. + loginServer, has := t.env.Values[environment.ContainerRegistryEndpointEnvVarName] + if !has { + return ServiceDeploymentResult{}, fmt.Errorf( + "could not determine container registry endpoint, ensure %s is set as an output of your infrastructure", + environment.ContainerRegistryEndpointEnvVarName, + ) + } + + log.Printf("getting AKS credentials %s\n", clusterName) + progress <- "Getting AKS credentials" + credentials, err := t.containerService.GetAdminCredentials(ctx, t.scope.ResourceGroupName(), clusterName) + if err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed retrieving cluster admin credentials, %w", err) + } + + kubeConfigManager, err := kubectl.NewKubeConfigManager(t.kubectl) + if err != nil { + return ServiceDeploymentResult{}, err + } + + kubeConfig, err := kubectl.ParseKubeConfig(ctx, credentials.Kubeconfigs[0].Value) + if err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed parsing kube config: %w", err) + } + + if err := kubeConfigManager.SaveKubeConfig(ctx, clusterName, kubeConfig); err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed saving kube config: %w", err) + } + + if err := kubeConfigManager.MergeConfigs(ctx, "config", "config", clusterName); err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed merging kube configs: %w", err) + } + + if _, err := t.kubectl.ConfigUseContext(ctx, clusterName, nil); err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed using kube context '%s', %w", clusterName, err) + } + + kubeFlags := kubectl.KubeCliFlags{ + Namespace: namespace, + DryRun: "client", + Output: "yaml", + } + + progress <- "Creating k8s namespace" + namespaceResult, err := t.kubectl.CreateNamespace( + ctx, + namespace, + &kubectl.KubeCliFlags{DryRun: "client", Output: "yaml"}, + ) + if err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed creating kube namespace: %w", err) + } + + _, err = t.kubectl.ApplyPipe(ctx, namespaceResult.Stdout, nil) + if err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed applying kube namespace: %w", err) + } + + progress <- "Creating k8s secrets" + secretResult, err := t.kubectl.CreateSecretGenericFromLiterals(ctx, "azd", t.env.Environ(), &kubeFlags) + if err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed setting kube secrets: %w", err) + } + + _, err = t.kubectl.ApplyPipe(ctx, secretResult.Stdout, nil) + if err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed applying kube secrets: %w", err) + } + + log.Printf("logging into registry %s\n", loginServer) + + progress <- "Logging into container registry" + if err := t.az.LoginAcr(ctx, t.docker, t.env.GetSubscriptionId(), loginServer); err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed logging into registry '%s': %w", loginServer, err) + } + + imageTag, err := t.generateImageTag() + if err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed generating image tag: %w", err) + } + + fullTag := fmt.Sprintf( + "%s/%s", + loginServer, + imageTag, + ) + + // Tag image. + log.Printf("tagging image %s as %s", path, fullTag) + progress <- "Tagging image" + if err := t.docker.Tag(ctx, t.config.Path(), path, fullTag); err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed tagging image: %w", err) + } + + log.Printf("pushing %s to registry", fullTag) + + // Push image. + progress <- "Pushing container image" + if err := t.docker.Push(ctx, t.config.Path(), fullTag); err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed pushing image: %w", err) + } + + // Save the name of the image we pushed into the environment with a well known key. + t.env.SetServiceProperty(t.config.Name, "IMAGE_NAME", fullTag) + + if err := t.env.Save(); err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("saving image name to environment: %w", err) + } + + progress <- "Applying k8s manifests" + t.kubectl.SetEnv(t.env.Values) + err = t.kubectl.ApplyFiles( + ctx, + filepath.Join(t.config.RelativePath, "manifests"), + &kubectl.KubeCliFlags{Namespace: namespace}, + ) + if err != nil { + return ServiceDeploymentResult{}, fmt.Errorf("failed applying kube manifests: %w", err) + } + + deploymentName := t.config.K8s.Deployment.Name + if deploymentName == "" { + deploymentName = t.config.Name + } + + // It is not a requirement for a AZD deploy to contain a deployment object + // If we don't find any deployment within the namespace we will continue + deployment, err := t.waitForDeployment(ctx, namespace, deploymentName) + if err != nil && !errors.Is(err, kubectl.ErrResourceNotFound) { + return ServiceDeploymentResult{}, err + } + + endpoints, err := t.Endpoints(ctx) + if err != nil { + return ServiceDeploymentResult{}, err + } + + return ServiceDeploymentResult{ + TargetResourceId: azure.KubernetesServiceRID( + t.env.GetSubscriptionId(), + t.scope.ResourceGroupName(), + t.scope.ResourceName(), + ), + Kind: AksTarget, + Details: deployment, + Endpoints: endpoints, + }, nil +} + +func (t *aksTarget) Endpoints(ctx context.Context) ([]string, error) { + namespace := t.getK8sNamespace() + + serviceName := t.config.K8s.Service.Name + if serviceName == "" { + serviceName = t.config.Name + } + + ingressName := t.config.K8s.Service.Name + if ingressName == "" { + ingressName = t.config.Name + } + + // Find endpoints for any matching services + // These endpoints would typically be internal cluster accessible endpoints + serviceEndpoints, err := t.getServiceEndpoints(ctx, namespace, serviceName) + if err != nil && !errors.Is(err, kubectl.ErrResourceNotFound) { + return nil, fmt.Errorf("failed retrieving service endpoints, %w", err) + } + + // Find endpoints for any matching ingress controllers + // These endpoints would typically be publicly accessible endpoints + ingressEndpoints, err := t.getIngressEndpoints(ctx, namespace, ingressName) + if err != nil && !errors.Is(err, kubectl.ErrResourceNotFound) { + return nil, fmt.Errorf("failed retrieving ingress endpoints, %w", err) + } + + endpoints := append(serviceEndpoints, ingressEndpoints...) + + return endpoints, nil +} + +// Finds a deployment using the specified deploymentNameFilter string +// Waits until the deployment rollout is complete nad all replicas are accessible +func (t *aksTarget) waitForDeployment( + ctx context.Context, + namespace string, + deploymentNameFilter string, +) (*kubectl.Deployment, error) { + return kubectl.WaitForResource( + ctx, t.kubectl, namespace, kubectl.ResourceTypeDeployment, + func(deployment *kubectl.Deployment) bool { + return strings.Contains(deployment.Metadata.Name, deploymentNameFilter) + }, + func(deployment *kubectl.Deployment) bool { + return deployment.Status.AvailableReplicas == deployment.Spec.Replicas + }, + ) +} + +// Finds an ingress using the specified ingressNameFilter string +// Waits until the ingress LoadBalancer has assigned a valid IP address +func (t *aksTarget) waitForIngress( + ctx context.Context, + namespace string, + ingressNameFilter string, +) (*kubectl.Ingress, error) { + return kubectl.WaitForResource( + ctx, t.kubectl, namespace, kubectl.ResourceTypeIngress, + func(ingress *kubectl.Ingress) bool { + return strings.Contains(ingress.Metadata.Name, ingressNameFilter) + }, + func(ingress *kubectl.Ingress) bool { + var ipAddress string + for _, config := range ingress.Status.LoadBalancer.Ingress { + if config.Ip != "" { + ipAddress = config.Ip + break + } + } + + return ipAddress != "" + }, + ) +} + +// Finds a service using the specified serviceNameFilter string +// Waits until the service is available +func (t *aksTarget) waitForService( + ctx context.Context, + namespace string, + serviceNameFilter string, +) (*kubectl.Service, error) { + return kubectl.WaitForResource( + ctx, t.kubectl, namespace, kubectl.ResourceTypeService, + func(service *kubectl.Service) bool { + return strings.Contains(service.Metadata.Name, serviceNameFilter) + }, + func(service *kubectl.Service) bool { + // If the service is not a load balancer it should be immediately available + if service.Spec.Type != kubectl.ServiceTypeLoadBalancer { + return true + } + + // Load balancer can take some time to be provision by AKS + var ipAddress string + for _, config := range service.Status.LoadBalancer.Ingress { + if config.Ip != "" { + ipAddress = config.Ip + break + } + } + + return ipAddress != "" + }, + ) +} + +// Retrieve any service endpoints for the specified namespace and serviceNameFilter +// Supports service types for LoadBalancer and ClusterIP +func (t *aksTarget) getServiceEndpoints(ctx context.Context, namespace string, serviceNameFilter string) ([]string, error) { + service, err := t.waitForService(ctx, namespace, serviceNameFilter) + if err != nil { + return nil, err + } + + var endpoints []string + if service.Spec.Type == kubectl.ServiceTypeLoadBalancer { + for _, resource := range service.Status.LoadBalancer.Ingress { + endpoints = append(endpoints, fmt.Sprintf("http://%s (Service, Type: LoadBalancer)", resource.Ip)) + } + } else if service.Spec.Type == kubectl.ServiceTypeClusterIp { + for index, ip := range service.Spec.ClusterIps { + endpoints = append(endpoints, fmt.Sprintf("http://%s:%d (Service, Type: ClusterIP)", ip, service.Spec.Ports[index].Port)) + } + } + + return endpoints, nil +} + +// Retrieve any ingress endpoints for the specified namespace and serviceNameFilter +// Supports service types for LoadBalancer, supports Hosts and/or IP address +func (t *aksTarget) getIngressEndpoints(ctx context.Context, namespace string, resourceFilter string) ([]string, error) { + ingress, err := t.waitForIngress(ctx, namespace, resourceFilter) + if err != nil { + return nil, err + } + + var endpoints []string + var protocol string + if ingress.Spec.Tls == nil { + protocol = "http" + } else { + protocol = "https" + } + + for index, resource := range ingress.Status.LoadBalancer.Ingress { + var baseUrl string + if ingress.Spec.Rules[index].Host == nil { + baseUrl = fmt.Sprintf("%s://%s", protocol, resource.Ip) + } else { + baseUrl = fmt.Sprintf("%s://%s", *ingress.Spec.Rules[index].Host, resource.Ip) + } + + endpointUrl, err := url.JoinPath(baseUrl, t.config.K8s.Ingress.RelativePath) + if err != nil { + return nil, fmt.Errorf("failed constructing service endpoints, %w", err) + } + + endpoints = append(endpoints, fmt.Sprintf("%s (Ingress, Type: LoadBalancer)", endpointUrl)) + } + + return endpoints, nil +} + +func (t *aksTarget) generateImageTag() (string, error) { + configuredTag, err := t.config.Docker.Tag.Envsubst(t.env.Getenv) + if err != nil { + return "", err + } + + if configuredTag != "" { + return configuredTag, nil + } + + return fmt.Sprintf("%s/%s-%s:azdev-deploy-%d", + strings.ToLower(t.config.Project.Name), + strings.ToLower(t.config.Name), + strings.ToLower(t.env.GetEnvName()), + t.clock.Now().Unix(), + ), nil +} + +func (t *aksTarget) getK8sNamespace() string { + namespace := t.config.K8s.Namespace + if namespace == "" { + namespace = t.config.Project.Name + } + + return namespace +} + +func NewAksTarget( + config *ServiceConfig, + env *environment.Environment, + scope *environment.TargetResource, + azCli azcli.AzCli, + containerService azcli.ContainerServiceClient, + kubectlCli kubectl.KubectlCli, + docker docker.Docker, +) ServiceTarget { + return &aksTarget{ + config: config, + env: env, + scope: scope, + az: azCli, + containerService: containerService, + docker: docker, + kubectl: kubectlCli, + clock: clock.New(), + } +} diff --git a/cli/azd/pkg/project/service_target_aks_test.go b/cli/azd/pkg/project/service_target_aks_test.go new file mode 100644 index 00000000000..0d9e1820f0f --- /dev/null +++ b/cli/azd/pkg/project/service_target_aks_test.go @@ -0,0 +1,516 @@ +package project + +import ( + "context" + "encoding/json" + "fmt" + "log" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2" + "github.com/azure/azure-dev/cli/azd/pkg/convert" + "github.com/azure/azure-dev/cli/azd/pkg/environment" + "github.com/azure/azure-dev/cli/azd/pkg/environment/azdcontext" + "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/pkg/infra" + "github.com/azure/azure-dev/cli/azd/pkg/osutil" + "github.com/azure/azure-dev/cli/azd/pkg/tools/azcli" + "github.com/azure/azure-dev/cli/azd/pkg/tools/docker" + "github.com/azure/azure-dev/cli/azd/pkg/tools/kubectl" + "github.com/azure/azure-dev/cli/azd/test/mocks" + "github.com/azure/azure-dev/cli/azd/test/ostest" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" +) + +func Test_NewAksTarget(t *testing.T) { + mockContext := mocks.NewMockContext(context.Background()) + serviceConfig := createServiceConfig("") + env := createEnv() + + serviceTarget, err := createServiceTarget(mockContext, serviceConfig, env) + + require.NoError(t, err) + require.NotNil(t, serviceTarget) + require.NotNil(t, serviceConfig) +} + +func Test_Deploy_HappyPath(t *testing.T) { + tempDir := t.TempDir() + ostest.Chdir(t, tempDir) + + mockContext := mocks.NewMockContext(context.Background()) + err := setupMocks(mockContext) + require.NoError(t, err) + + serviceConfig := createServiceConfig(tempDir) + env := createEnv() + + serviceTarget, err := createServiceTarget(mockContext, serviceConfig, env) + require.NoError(t, err) + + err = setupK8sManifests(t, serviceConfig) + require.NoError(t, err) + + azdContext := azdcontext.NewAzdContextWithDirectory(tempDir) + progressChan := make(chan (string)) + + go func() { + for value := range progressChan { + log.Println(value) + } + }() + + result, err := serviceTarget.Deploy(*mockContext.Context, azdContext, "", progressChan) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, AksTarget, result.Kind) + require.NotNil(t, env.Values["SERVICE_SVC_IMAGE_NAME"]) + require.IsType(t, new(kubectl.Deployment), result.Details) + require.Greater(t, len(result.Endpoints), 0) +} + +func Test_Deploy_No_Cluster_Name(t *testing.T) { + tempDir := t.TempDir() + ostest.Chdir(t, tempDir) + + mockContext := mocks.NewMockContext(context.Background()) + err := setupMocks(mockContext) + require.NoError(t, err) + + serviceConfig := createServiceConfig(tempDir) + env := createEnv() + + // Simulate AKS cluster name not found in env file + delete(env.Values, environment.AksClusterEnvVarName) + + serviceTarget, err := createServiceTarget(mockContext, serviceConfig, env) + require.NoError(t, err) + + azdContext := azdcontext.NewAzdContextWithDirectory(tempDir) + progressChan := make(chan (string)) + + go func() { + for value := range progressChan { + log.Println(value) + } + }() + + result, err := serviceTarget.Deploy(*mockContext.Context, azdContext, "", progressChan) + require.Error(t, err) + require.ErrorContains(t, err, "could not determine AKS cluster") + require.Equal(t, ServiceDeploymentResult{}, result) +} + +func Test_Deploy_No_Container_Registry(t *testing.T) { + tempDir := t.TempDir() + ostest.Chdir(t, tempDir) + + mockContext := mocks.NewMockContext(context.Background()) + err := setupMocks(mockContext) + require.NoError(t, err) + + serviceConfig := createServiceConfig(tempDir) + env := createEnv() + + // Simulate container registry endpoint not found in env file + delete(env.Values, environment.ContainerRegistryEndpointEnvVarName) + + serviceTarget, err := createServiceTarget(mockContext, serviceConfig, env) + require.NoError(t, err) + + azdContext := azdcontext.NewAzdContextWithDirectory(tempDir) + progressChan := make(chan (string)) + + result, err := serviceTarget.Deploy(*mockContext.Context, azdContext, "", progressChan) + require.Error(t, err) + require.ErrorContains(t, err, "could not determine container registry endpoint") + require.Equal(t, ServiceDeploymentResult{}, result) +} + +func Test_Deploy_No_Admin_Credentials(t *testing.T) { + tempDir := t.TempDir() + ostest.Chdir(t, tempDir) + + mockContext := mocks.NewMockContext(context.Background()) + err := setupMocks(mockContext) + require.NoError(t, err) + + // Simulate list credentials fail. + // For more secure clusters getting admin credentials can fail + err = setupListClusterAdminCredentialsMock(mockContext, http.StatusUnauthorized) + require.NoError(t, err) + + serviceConfig := createServiceConfig(tempDir) + env := createEnv() + + serviceTarget, err := createServiceTarget(mockContext, serviceConfig, env) + require.NoError(t, err) + + azdContext := azdcontext.NewAzdContextWithDirectory(tempDir) + progressChan := make(chan (string)) + + go func() { + for value := range progressChan { + log.Println(value) + } + }() + + result, err := serviceTarget.Deploy(*mockContext.Context, azdContext, "", progressChan) + require.Error(t, err) + require.ErrorContains(t, err, "failed retrieving cluster admin credentials") + require.Equal(t, ServiceDeploymentResult{}, result) +} + +func setupK8sManifests(t *testing.T, serviceConfig *ServiceConfig) error { + manifestsDir := filepath.Join(serviceConfig.RelativePath, "manifests") + err := os.MkdirAll(manifestsDir, osutil.PermissionDirectory) + require.NoError(t, err) + + filenames := []string{"deployment.yaml", "service.yaml", "ingress.yaml"} + + for _, filename := range filenames { + err = os.WriteFile(filepath.Join(manifestsDir, filename), []byte(""), osutil.PermissionFile) + require.NoError(t, err) + } + + return nil +} + +func setupListClusterAdminCredentialsMock(mockContext *mocks.MockContext, statusCode int) error { + kubeConfig := createTestCluster("cluster1", "user1") + kubeConfigBytes, err := yaml.Marshal(kubeConfig) + if err != nil { + return err + } + + // Get Admin cluster credentials + mockContext.HttpClient.When(func(request *http.Request) bool { + return request.Method == http.MethodPost && strings.Contains(request.URL.Path, "listClusterAdminCredential") + }).RespondFn(func(request *http.Request) (*http.Response, error) { + creds := armcontainerservice.CredentialResults{ + Kubeconfigs: []*armcontainerservice.CredentialResult{ + { + Name: convert.RefOf("context"), + Value: kubeConfigBytes, + }, + }, + } + + if statusCode == http.StatusOK { + return mocks.CreateHttpResponseWithBody(request, statusCode, creds) + } else { + return mocks.CreateEmptyHttpResponse(request, statusCode) + } + }) + + return nil +} + +func setupMocks(mockContext *mocks.MockContext) error { + err := setupListClusterAdminCredentialsMock(mockContext, http.StatusOK) + if err != nil { + return err + } + + // Config view + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kubectl config view") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + return exec.NewRunResult(0, "", ""), nil + }) + + // Config use context + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kubectl config use-context") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + return exec.NewRunResult(0, "", ""), nil + }) + + // Create Namespace + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kubectl create namespace") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + return exec.NewRunResult(0, "", ""), nil + }) + + // Apply Pipe + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kubectl apply -f -") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + return exec.NewRunResult(0, "", ""), nil + }) + + // Create Secret + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kubectl create secret generic") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + return exec.NewRunResult(0, "", ""), nil + }) + + // List container registries + mockContext.HttpClient.When(func(request *http.Request) bool { + return request.Method == http.MethodGet && + strings.Contains(request.URL.Path, "Microsoft.ContainerRegistry/registries") + }).RespondFn(func(request *http.Request) (*http.Response, error) { + result := armcontainerregistry.RegistryListResult{ + NextLink: nil, + Value: []*armcontainerregistry.Registry{ + { + ID: convert.RefOf( + //nolint:lll + "/subscriptions/SUBSCRIPTION_ID/resourceGroups/RESOURCE_GROUP/providers/Microsoft.ContainerRegistry/registries/REGISTRY", + ), + Location: convert.RefOf("eastus2"), + Name: convert.RefOf("REGISTRY"), + Properties: &armcontainerregistry.RegistryProperties{ + LoginServer: convert.RefOf("REGISTRY.azurecr.io"), + }, + }, + }, + } + + return mocks.CreateHttpResponseWithBody(request, http.StatusOK, result) + }) + + // List container credentials + mockContext.HttpClient.When(func(request *http.Request) bool { + return request.Method == http.MethodPost && strings.Contains(request.URL.Path, "listCredentials") + }).RespondFn(func(request *http.Request) (*http.Response, error) { + result := armcontainerregistry.RegistryListCredentialsResult{ + Username: convert.RefOf("admin"), + Passwords: []*armcontainerregistry.RegistryPassword{ + { + Name: convert.RefOf(armcontainerregistry.PasswordName("admin")), + Value: convert.RefOf("password"), + }, + }, + } + + return mocks.CreateHttpResponseWithBody(request, http.StatusOK, result) + }) + + // Docker login + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "docker login") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + return exec.NewRunResult(0, "", ""), nil + }) + + // Docker Tag + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "docker tag") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + return exec.NewRunResult(0, "", ""), nil + }) + + // Push Container Image + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "docker push") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + return exec.NewRunResult(0, "", ""), nil + }) + + // Get deployments + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kubectl get deployment") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + deployment := &kubectl.Deployment{ + Resource: kubectl.Resource{ + ApiVersion: "apps/v1", + Kind: "Deployment", + Metadata: kubectl.ResourceMetadata{ + Name: "svc-deployment", + Namespace: "svc-namespace", + }, + }, + Spec: kubectl.DeploymentSpec{ + Replicas: 2, + }, + Status: kubectl.DeploymentStatus{ + AvailableReplicas: 2, + ReadyReplicas: 2, + Replicas: 2, + UpdatedReplicas: 2, + }, + } + deploymentList := createK8sResourceList(deployment) + jsonBytes, _ := json.Marshal(deploymentList) + + return exec.NewRunResult(0, string(jsonBytes), ""), nil + }) + + // Get services + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kubectl get svc") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + service := &kubectl.Service{ + Resource: kubectl.Resource{ + ApiVersion: "v1", + Kind: "Service", + Metadata: kubectl.ResourceMetadata{ + Name: "svc-service", + Namespace: "svc-namespace", + }, + }, + Spec: kubectl.ServiceSpec{ + Type: kubectl.ServiceTypeClusterIp, + ClusterIps: []string{ + "10.10.10.10", + }, + Ports: []kubectl.Port{ + { + Port: 80, + TargetPort: 3000, + Protocol: "http", + }, + }, + }, + } + serviceList := createK8sResourceList(service) + jsonBytes, _ := json.Marshal(serviceList) + + return exec.NewRunResult(0, string(jsonBytes), ""), nil + }) + + // Get Ingress + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kubectl get ing") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + ingress := &kubectl.Ingress{ + Resource: kubectl.Resource{ + ApiVersion: "networking.k8s.io/v1", + Kind: "Ingress", + Metadata: kubectl.ResourceMetadata{ + Name: "svc-ingress", + Namespace: "svc-namespace", + }, + }, + Spec: kubectl.IngressSpec{ + IngressClassName: "webapprouting.kubernetes.azure.com", + Rules: []kubectl.IngressRule{ + { + Http: kubectl.IngressRuleHttp{ + Paths: []kubectl.IngressPath{ + { + Path: "/", + PathType: "Prefix", + }, + }, + }, + }, + }, + }, + Status: kubectl.IngressStatus{ + LoadBalancer: kubectl.LoadBalancer{ + Ingress: []kubectl.LoadBalancerIngress{ + { + Ip: "1.1.1.1", + }, + }, + }, + }, + } + ingressList := createK8sResourceList(ingress) + jsonBytes, _ := json.Marshal(ingressList) + + return exec.NewRunResult(0, string(jsonBytes), ""), nil + }) + + return nil +} + +func createK8sResourceList[T any](resource T) *kubectl.List[T] { + return &kubectl.List[T]{ + Resource: kubectl.Resource{ + ApiVersion: "list", + Kind: "List", + Metadata: kubectl.ResourceMetadata{ + Name: "list", + Namespace: "namespace", + }, + }, + Items: []T{ + resource, + }, + } +} + +func createServiceConfig(projectDirectory string) *ServiceConfig { + return &ServiceConfig{ + Project: &ProjectConfig{ + Name: "project", + Path: projectDirectory, + }, + Name: "svc", + RelativePath: "./src", + Host: string(AksTarget), + Language: "js", + } +} + +func createEnv() *environment.Environment { + return environment.EphemeralWithValues("test", map[string]string{ + environment.TenantIdEnvVarName: "TENANT_ID", + environment.SubscriptionIdEnvVarName: "SUBSCRIPTION_ID", + environment.LocationEnvVarName: "LOCATION", + environment.ResourceGroupEnvVarName: "RESOURCE_GROUP", + environment.AksClusterEnvVarName: "AKS_CLUSTER", + environment.ContainerRegistryEndpointEnvVarName: "REGISTRY.azurecr.io", + }) +} + +func createServiceTarget( + mockContext *mocks.MockContext, + serviceConfig *ServiceConfig, + env *environment.Environment, +) (ServiceTarget, error) { + scope := environment.NewTargetResource("SUB_ID", "RG_ID", "CLUSTER_NAME", string(infra.AzureResourceTypeManagedCluster)) + azCli := azcli.NewAzCli(mockContext.Credentials, azcli.NewAzCliArgs{}) + containerServiceClient, err := azCli.ContainerService(*mockContext.Context, env.GetSubscriptionId()) + + if err != nil { + return nil, err + } + + kubeCtl := kubectl.NewKubectl(mockContext.CommandRunner) + docker := docker.NewDocker(mockContext.CommandRunner) + + return NewAksTarget(serviceConfig, env, scope, azCli, containerServiceClient, kubeCtl, docker), nil +} + +func createTestCluster(clusterName, username string) *kubectl.KubeConfig { + return &kubectl.KubeConfig{ + ApiVersion: "v1", + Kind: "Config", + CurrentContext: clusterName, + Preferences: kubectl.KubePreferences{}, + Clusters: []*kubectl.KubeCluster{ + { + Name: clusterName, + Cluster: kubectl.KubeClusterData{ + Server: fmt.Sprintf("https://%s.eastus2.azmk8s.io:443", clusterName), + }, + }, + }, + Users: []*kubectl.KubeUser{ + { + Name: fmt.Sprintf("%s_%s", clusterName, username), + }, + }, + Contexts: []*kubectl.KubeContext{ + { + Name: clusterName, + Context: kubectl.KubeContextData{ + Cluster: clusterName, + User: fmt.Sprintf("%s_%s", clusterName, username), + }, + }, + }, + } +} diff --git a/cli/azd/pkg/project/service_target_containerapp.go b/cli/azd/pkg/project/service_target_containerapp.go index a5c96c4a71e..8be00b8dc98 100644 --- a/cli/azd/pkg/project/service_target_containerapp.go +++ b/cli/azd/pkg/project/service_target_containerapp.go @@ -31,7 +31,7 @@ type containerAppTarget struct { env *environment.Environment resource *environment.TargetResource cli azcli.AzCli - docker *docker.Docker + docker docker.Docker console input.Console commandRunner exec.CommandRunner accountManager account.Manager @@ -70,7 +70,7 @@ func (at *containerAppTarget) Deploy( log.Printf("logging into registry %s", loginServer) progress <- "Logging into container registry" - if err := at.cli.LoginAcr(ctx, at.commandRunner, at.env.GetSubscriptionId(), loginServer); err != nil { + if err := at.cli.LoginAcr(ctx, at.docker, at.env.GetSubscriptionId(), loginServer); err != nil { return ServiceDeploymentResult{}, fmt.Errorf("logging into registry '%s': %w", loginServer, err) } @@ -234,7 +234,7 @@ func NewContainerAppTarget( env *environment.Environment, resource *environment.TargetResource, azCli azcli.AzCli, - docker *docker.Docker, + docker docker.Docker, console input.Console, commandRunner exec.CommandRunner, accountManager account.Manager, diff --git a/cli/azd/pkg/tools/azcli/azcli.go b/cli/azd/pkg/tools/azcli/azcli.go index add99aa95b7..0cc8ca4ebf0 100644 --- a/cli/azd/pkg/tools/azcli/azcli.go +++ b/cli/azd/pkg/tools/azcli/azcli.go @@ -16,8 +16,8 @@ import ( azdinternal "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/pkg/azsdk" "github.com/azure/azure-dev/cli/azd/pkg/azure" - "github.com/azure/azure-dev/cli/azd/pkg/exec" "github.com/azure/azure-dev/cli/azd/pkg/httputil" + "github.com/azure/azure-dev/cli/azd/pkg/tools/docker" ) var ( @@ -37,7 +37,7 @@ type AzCli interface { // UserAgent gets the currently configured user agent UserAgent() string - LoginAcr(ctx context.Context, commandRunner exec.CommandRunner, subscriptionId string, loginServer string) error + LoginAcr(ctx context.Context, dockerCli docker.Docker, subscriptionId string, loginServer string) error GetContainerRegistries(ctx context.Context, subscriptionId string) ([]*armcontainerregistry.Registry, error) GetSubscriptionDeployment( ctx context.Context, @@ -165,6 +165,9 @@ type AzCli interface { environmentName string, ) (*AzCliStaticWebAppEnvironmentProperties, error) GetAccessToken(ctx context.Context) (*AzCliAccessToken, error) + + // AKS + ContainerService(ctx context.Context, subscriptionId string) (ContainerServiceClient, error) } type AzCliDeployment struct { @@ -344,3 +347,8 @@ func clientOptionsBuilder(httpClient httputil.HttpClient, userAgent string) *azs WithTransport(httpClient). WithPerCallPolicy(azsdk.NewUserAgentPolicy(userAgent)) } + +func (cli *azCli) ContainerService(ctx context.Context, subscriptionId string) (ContainerServiceClient, error) { + options := cli.createDefaultClientOptionsBuilder(ctx).BuildArmClientOptions() + return NewContainerServiceClient(subscriptionId, cli.credential, options) +} diff --git a/cli/azd/pkg/tools/azcli/container_registry.go b/cli/azd/pkg/tools/azcli/container_registry.go index db6c02030fb..80d874219c3 100644 --- a/cli/azd/pkg/tools/azcli/container_registry.go +++ b/cli/azd/pkg/tools/azcli/container_registry.go @@ -7,7 +7,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry" "github.com/azure/azure-dev/cli/azd/pkg/azure" - "github.com/azure/azure-dev/cli/azd/pkg/exec" "github.com/azure/azure-dev/cli/azd/pkg/tools/docker" "golang.org/x/exp/slices" ) @@ -37,7 +36,7 @@ func (cli *azCli) GetContainerRegistries( } func (cli *azCli) LoginAcr(ctx context.Context, - commandRunner exec.CommandRunner, subscriptionId string, loginServer string, + dockerCli docker.Docker, subscriptionId string, loginServer string, ) error { client, err := cli.createRegistriesClient(ctx, subscriptionId) if err != nil { @@ -62,7 +61,6 @@ func (cli *azCli) LoginAcr(ctx context.Context, username := *credResponse.Username // Login to docker with ACR credentials to allow push operations - dockerCli := docker.NewDocker(commandRunner) err = dockerCli.Login(ctx, loginServer, username, *credResponse.Passwords[0].Value) if err != nil { return fmt.Errorf("failed logging into docker for username '%s' and server %s: %w", loginServer, username, err) diff --git a/cli/azd/pkg/tools/azcli/container_service.go b/cli/azd/pkg/tools/azcli/container_service.go new file mode 100644 index 00000000000..1721064499a --- /dev/null +++ b/cli/azd/pkg/tools/azcli/container_service.go @@ -0,0 +1,51 @@ +package azcli + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2" +) + +type ContainerServiceClient interface { + GetAdminCredentials( + ctx context.Context, + resourceGroupName string, + resourceName string, + ) (*armcontainerservice.CredentialResults, error) +} + +type containerServiceClient struct { + client *armcontainerservice.ManagedClustersClient + subscriptionId string +} + +func NewContainerServiceClient( + subscriptionId string, + credential azcore.TokenCredential, + options *arm.ClientOptions, +) (ContainerServiceClient, error) { + azureClient, err := armcontainerservice.NewManagedClustersClient(subscriptionId, credential, options) + if err != nil { + return nil, err + } + + return &containerServiceClient{ + subscriptionId: subscriptionId, + client: azureClient, + }, nil +} + +func (cs *containerServiceClient) GetAdminCredentials( + ctx context.Context, + resourceGroupName string, + resourceName string, +) (*armcontainerservice.CredentialResults, error) { + creds, err := cs.client.ListClusterAdminCredentials(ctx, resourceGroupName, resourceName, nil) + if err != nil { + return nil, err + } + + return &creds.CredentialResults, nil +} diff --git a/cli/azd/pkg/tools/docker/docker.go b/cli/azd/pkg/tools/docker/docker.go index 76b62f00e8d..eb0edc8687f 100644 --- a/cli/azd/pkg/tools/docker/docker.go +++ b/cli/azd/pkg/tools/docker/docker.go @@ -13,17 +13,25 @@ import ( "github.com/blang/semver/v4" ) -func NewDocker(commandRunner exec.CommandRunner) *Docker { - return &Docker{ +type Docker interface { + tools.ExternalTool + Login(ctx context.Context, loginServer string, username string, password string) error + Build(ctx context.Context, cwd string, dockerFilePath string, platform string, buildContext string) (string, error) + Tag(ctx context.Context, cwd string, imageName string, tag string) error + Push(ctx context.Context, cwd string, tag string) error +} + +func NewDocker(commandRunner exec.CommandRunner) Docker { + return &docker{ commandRunner: commandRunner, } } -type Docker struct { +type docker struct { commandRunner exec.CommandRunner } -func (d *Docker) Login(ctx context.Context, loginServer string, username string, password string) error { +func (d *docker) Login(ctx context.Context, loginServer string, username string, password string) error { _, err := d.executeCommand(ctx, ".", "login", "--username", username, "--password", password, @@ -40,7 +48,7 @@ func (d *Docker) Login(ctx context.Context, loginServer string, username string, // it defaults to amd64. If the build // is successful, the function // returns the image id of the built image. -func (d *Docker) Build( +func (d *docker) Build( ctx context.Context, cwd string, dockerFilePath string, @@ -59,7 +67,7 @@ func (d *Docker) Build( return strings.TrimSpace(res.Stdout), nil } -func (d *Docker) Tag(ctx context.Context, cwd string, imageName string, tag string) error { +func (d *docker) Tag(ctx context.Context, cwd string, imageName string, tag string) error { res, err := d.executeCommand(ctx, cwd, "tag", imageName, tag) if err != nil { return fmt.Errorf("tagging image: %s: %w", res.String(), err) @@ -68,7 +76,7 @@ func (d *Docker) Tag(ctx context.Context, cwd string, imageName string, tag stri return nil } -func (d *Docker) Push(ctx context.Context, cwd string, tag string) error { +func (d *docker) Push(ctx context.Context, cwd string, tag string) error { res, err := d.executeCommand(ctx, cwd, "push", tag) if err != nil { return fmt.Errorf("pushing image: %s: %w", res.String(), err) @@ -77,7 +85,7 @@ func (d *Docker) Push(ctx context.Context, cwd string, tag string) error { return nil } -func (d *Docker) versionInfo() tools.VersionInfo { +func (d *docker) versionInfo() tools.VersionInfo { return tools.VersionInfo{ MinimumVersion: semver.Version{ Major: 17, @@ -162,8 +170,7 @@ func isSupportedDockerVersion(cliOutput string) (bool, error) { // If we reach this point, we don't understand how to validate the version based on its scheme. return false, fmt.Errorf("could not determine version from docker version string: %s", version) } - -func (d *Docker) CheckInstalled(ctx context.Context) (bool, error) { +func (d *docker) CheckInstalled(ctx context.Context) (bool, error) { found, err := tools.ToolInPath("docker") if !found { return false, err @@ -182,15 +189,15 @@ func (d *Docker) CheckInstalled(ctx context.Context) (bool, error) { return true, nil } -func (d *Docker) InstallUrl() string { +func (d *docker) InstallUrl() string { return "https://aka.ms/azure-dev/docker-install" } -func (d *Docker) Name() string { +func (d *docker) Name() string { return "Docker" } -func (d *Docker) executeCommand(ctx context.Context, cwd string, args ...string) (exec.RunResult, error) { +func (d *docker) executeCommand(ctx context.Context, cwd string, args ...string) (exec.RunResult, error) { runArgs := exec.NewRunArgs("docker", args...). WithCwd(cwd). WithEnrichError(true) diff --git a/cli/azd/pkg/tools/kubectl/kube_config.go b/cli/azd/pkg/tools/kubectl/kube_config.go new file mode 100644 index 00000000000..bed4f2bedd3 --- /dev/null +++ b/cli/azd/pkg/tools/kubectl/kube_config.go @@ -0,0 +1,118 @@ +package kubectl + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/azure/azure-dev/cli/azd/pkg/osutil" + "gopkg.in/yaml.v3" +) + +type KubeConfigManager struct { + cli KubectlCli + configPath string +} + +func NewKubeConfigManager(cli KubectlCli) (*KubeConfigManager, error) { + kubeConfigDir, err := getKubeConfigDir() + if err != nil { + return nil, err + } + + return &KubeConfigManager{ + cli: cli, + configPath: kubeConfigDir, + }, nil +} + +func ParseKubeConfig(ctx context.Context, raw []byte) (*KubeConfig, error) { + var existing KubeConfig + if err := yaml.Unmarshal(raw, &existing); err != nil { + return nil, fmt.Errorf("failed unmarshalling Kube Config YAML: %w", err) + } + + return &existing, nil +} + +func (kcm *KubeConfigManager) SaveKubeConfig(ctx context.Context, configName string, config *KubeConfig) error { + kubeConfigRaw, err := yaml.Marshal(config) + if err != nil { + return fmt.Errorf("failed marshalling KubeConfig to yaml: %w", err) + } + + // Create .kube config folder if it doesn't already exist + _, err = os.Stat(kcm.configPath) + if err != nil { + if err := os.MkdirAll(kcm.configPath, osutil.PermissionDirectory); err != nil { + return fmt.Errorf("failed creating .kube config directory, %w", err) + } + } + + outFilePath := filepath.Join(kcm.configPath, configName) + err = os.WriteFile(outFilePath, kubeConfigRaw, osutil.PermissionFile) + if err != nil { + return fmt.Errorf("failed writing kube config file: %w", err) + } + + return nil +} + +func (kcm *KubeConfigManager) DeleteKubeConfig(ctx context.Context, configName string) error { + kubeConfigPath := filepath.Join(kcm.configPath, configName) + err := os.Remove(kubeConfigPath) + if err != nil { + return fmt.Errorf("failed deleting kube config file: %w", err) + } + + return nil +} + +func (kcm *KubeConfigManager) MergeConfigs(ctx context.Context, newConfigName string, path ...string) error { + fullConfigPaths := []string{} + for _, kubeConfigName := range path { + fullConfigPaths = append(fullConfigPaths, filepath.Join(kcm.configPath, kubeConfigName)) + } + + envValues := map[string]string{ + "KUBECONFIG": strings.Join(fullConfigPaths, string(os.PathListSeparator)), + } + kcm.cli.SetEnv(envValues) + res, err := kcm.cli.ConfigView(ctx, true, true, nil) + if err != nil { + return fmt.Errorf("kubectl config view failed: %w", err) + } + + kubeConfigRaw := []byte(res.Stdout) + outFilePath := filepath.Join(kcm.configPath, newConfigName) + err = os.WriteFile(outFilePath, kubeConfigRaw, osutil.PermissionFile) + if err != nil { + return fmt.Errorf("failed writing new kube config: %w", err) + } + + return nil +} + +func (kcm *KubeConfigManager) AddOrUpdateContext(ctx context.Context, contextName string, newKubeConfig *KubeConfig) error { + err := kcm.SaveKubeConfig(ctx, contextName, newKubeConfig) + if err != nil { + return fmt.Errorf("failed write new kube context file: %w", err) + } + + err = kcm.MergeConfigs(ctx, "config", contextName) + if err != nil { + return fmt.Errorf("failed merging KUBE configs: %w", err) + } + + return nil +} + +func getKubeConfigDir() (string, error) { + userHomeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("cannot get user home directory: %w", err) + } + return filepath.Join(userHomeDir, ".kube"), nil +} diff --git a/cli/azd/pkg/tools/kubectl/kube_config_test.go b/cli/azd/pkg/tools/kubectl/kube_config_test.go new file mode 100644 index 00000000000..9642a7e8e1e --- /dev/null +++ b/cli/azd/pkg/tools/kubectl/kube_config_test.go @@ -0,0 +1,74 @@ +package kubectl + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/test/mocks" + "github.com/stretchr/testify/require" +) + +func Test_MergeKubeConfig(t *testing.T) { + mockContext := mocks.NewMockContext(context.Background()) + commandRunner := exec.NewCommandRunner(os.Stdin, os.Stdout, os.Stderr) + cli := NewKubectl(commandRunner) + kubeConfigManager, err := NewKubeConfigManager(cli) + require.NoError(t, err) + + config1 := createTestCluster("cluster1", "user1") + config2 := createTestCluster("cluster2", "user2") + config3 := createTestCluster("cluster3", "user3") + + defer func() { + err := kubeConfigManager.DeleteKubeConfig(*mockContext.Context, "config1") + require.NoError(t, err) + err = kubeConfigManager.DeleteKubeConfig(*mockContext.Context, "config2") + require.NoError(t, err) + err = kubeConfigManager.DeleteKubeConfig(*mockContext.Context, "config3") + require.NoError(t, err) + }() + + err = kubeConfigManager.SaveKubeConfig(*mockContext.Context, "config1", config1) + require.NoError(t, err) + err = kubeConfigManager.SaveKubeConfig(*mockContext.Context, "config2", config2) + require.NoError(t, err) + err = kubeConfigManager.SaveKubeConfig(*mockContext.Context, "config3", config3) + require.NoError(t, err) + + err = kubeConfigManager.MergeConfigs(*mockContext.Context, "config", "config1", "config2", "config3") + require.NoError(t, err) +} + +func createTestCluster(clusterName, username string) *KubeConfig { + return &KubeConfig{ + ApiVersion: "v1", + Kind: "Config", + CurrentContext: clusterName, + Preferences: KubePreferences{}, + Clusters: []*KubeCluster{ + { + Name: clusterName, + Cluster: KubeClusterData{ + Server: fmt.Sprintf("https://%s.eastus2.azmk8s.io:443", clusterName), + }, + }, + }, + Users: []*KubeUser{ + { + Name: fmt.Sprintf("%s_%s", clusterName, username), + }, + }, + Contexts: []*KubeContext{ + { + Name: clusterName, + Context: KubeContextData{ + Cluster: clusterName, + User: fmt.Sprintf("%s_%s", clusterName, username), + }, + }, + }, + } +} diff --git a/cli/azd/pkg/tools/kubectl/kubectl.go b/cli/azd/pkg/tools/kubectl/kubectl.go new file mode 100644 index 00000000000..8b26cd123fb --- /dev/null +++ b/cli/azd/pkg/tools/kubectl/kubectl.go @@ -0,0 +1,265 @@ +package kubectl + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/pkg/tools" + "github.com/drone/envsubst" +) + +// Executes commands against the Kubernetes CLI +type KubectlCli interface { + tools.ExternalTool + // Sets the current working directory + Cwd(cwd string) + // Sets the env vars available to the CLI + SetEnv(env map[string]string) + // Applies one or more files from the specified path + ApplyFiles(ctx context.Context, path string, flags *KubeCliFlags) error + // Applies manifests from the specified input + ApplyPipe(ctx context.Context, input string, flags *KubeCliFlags) (*exec.RunResult, error) + // Views the current k8s configuration including available clusters, contexts & users + ConfigView(ctx context.Context, merge bool, flatten bool, flags *KubeCliFlags) (*exec.RunResult, error) + // Sets the k8s context to use for future CLI commands + ConfigUseContext(ctx context.Context, name string, flags *KubeCliFlags) (*exec.RunResult, error) + // Creates a new k8s namespace with the specified name + CreateNamespace(ctx context.Context, name string, flags *KubeCliFlags) (*exec.RunResult, error) + // Creates a new generic secret from the specified secret pairs + CreateSecretGenericFromLiterals( + ctx context.Context, + name string, + secrets []string, + flags *KubeCliFlags, + ) (*exec.RunResult, error) + // Executes a k8s CLI command from the specified arguments and flags + Exec(ctx context.Context, flags *KubeCliFlags, args ...string) (exec.RunResult, error) +} + +// K8s CLI Fags +type KubeCliFlags struct { + // The namespace to filter the command or create resources + Namespace string + // The dry-run type, defaults to empty + DryRun string + // The expected output, typically JSON or YAML + Output string +} + +type kubectlCli struct { + tools.ExternalTool + commandRunner exec.CommandRunner + env map[string]string + cwd string +} + +// Creates a new K8s CLI instance +func NewKubectl(commandRunner exec.CommandRunner) KubectlCli { + return &kubectlCli{ + commandRunner: commandRunner, + } +} + +// Checks whether or not the K8s CLI is installed and available within the PATH +func (cli *kubectlCli) CheckInstalled(ctx context.Context) (bool, error) { + return tools.ToolInPath("kubectl") +} + +// Returns the installation URL to install the K8s CLI +func (cli *kubectlCli) InstallUrl() string { + return "https://aka.ms/azure-dev/kubectl-install" +} + +// Gets the name of the Tool +func (cli *kubectlCli) Name() string { + return "kubectl" +} + +// Sets the env vars available to the CLI +func (cli *kubectlCli) SetEnv(envValues map[string]string) { + cli.env = envValues +} + +// Sets the current working directory +func (cli *kubectlCli) Cwd(cwd string) { + cli.cwd = cwd +} + +// Sets the k8s context to use for future CLI commands +func (cli *kubectlCli) ConfigUseContext(ctx context.Context, name string, flags *KubeCliFlags) (*exec.RunResult, error) { + res, err := cli.Exec(ctx, flags, "config", "use-context", name) + if err != nil { + return nil, fmt.Errorf("failed setting kubectl context: %w", err) + } + + return &res, nil +} + +// Views the current k8s configuration including available clusters, contexts & users +func (cli *kubectlCli) ConfigView( + ctx context.Context, + merge bool, + flatten bool, + flags *KubeCliFlags, +) (*exec.RunResult, error) { + kubeConfigDir, err := getKubeConfigDir() + if err != nil { + return nil, err + } + + args := []string{"config", "view"} + if merge { + args = append(args, "--merge") + } + if flatten { + args = append(args, "--flatten") + } + + runArgs := exec.NewRunArgs("kubectl", args...). + WithCwd(kubeConfigDir). + WithEnv(environ(cli.env)) + + res, err := cli.executeCommandWithArgs(ctx, runArgs, flags) + if err != nil { + return nil, fmt.Errorf("kubectl config view: %w", err) + } + + return &res, nil +} + +func (cli *kubectlCli) ApplyPipe(ctx context.Context, input string, flags *KubeCliFlags) (*exec.RunResult, error) { + runArgs := exec. + NewRunArgs("kubectl", "apply", "-f", "-"). + WithEnv(environ(cli.env)). + WithStdIn(strings.NewReader(input)) + + res, err := cli.executeCommandWithArgs(ctx, runArgs, flags) + if err != nil { + return nil, fmt.Errorf("kubectl apply -f: %w", err) + } + + return &res, nil +} + +// Applies manifests from the specified input +func (cli *kubectlCli) ApplyFiles(ctx context.Context, path string, flags *KubeCliFlags) error { + entries, err := os.ReadDir(path) + if err != nil { + return fmt.Errorf("failed reading files in path, '%s', %w", path, err) + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + ext := filepath.Ext(entry.Name()) + if !(ext == ".yaml" || ext == ".yml") { + continue + } + + filePath := filepath.Join(path, entry.Name()) + fileBytes, err := os.ReadFile(filePath) + if err != nil { + return fmt.Errorf("failed reading manifest file '%s', %w", filePath, err) + } + + yaml := string(fileBytes) + replaced, err := envsubst.Eval(yaml, func(name string) string { + if val, has := cli.env[name]; has { + return val + } + return os.Getenv(name) + }) + + if err != nil { + return fmt.Errorf("failed replacing env vars, %w", err) + } + + _, err = cli.ApplyPipe(ctx, replaced, flags) + if err != nil { + return fmt.Errorf("failed applying manifest, %w", err) + } + } + + return nil +} + +// Creates a new generic secret from the specified secret pairs +func (cli *kubectlCli) CreateSecretGenericFromLiterals( + ctx context.Context, + name string, + secrets []string, + flags *KubeCliFlags, +) (*exec.RunResult, error) { + args := []string{"create", "secret", "generic", name} + for _, secret := range secrets { + args = append(args, fmt.Sprintf("--from-literal=%s", secret)) + } + + res, err := cli.Exec(ctx, flags, args...) + if err != nil { + return nil, fmt.Errorf("kubectl create secret generic --from-env-file: %w", err) + } + + return &res, nil +} + +// Creates a new k8s namespace with the specified name +func (cli *kubectlCli) CreateNamespace(ctx context.Context, name string, flags *KubeCliFlags) (*exec.RunResult, error) { + args := []string{"create", "namespace", name} + + res, err := cli.Exec(ctx, flags, args...) + if err != nil { + return nil, fmt.Errorf("kubectl create namespace: %w", err) + } + + return &res, nil +} + +// Executes a k8s CLI command from the specified arguments and flags +func (cli *kubectlCli) Exec(ctx context.Context, flags *KubeCliFlags, args ...string) (exec.RunResult, error) { + runArgs := exec. + NewRunArgs("kubectl"). + AppendParams(args...) + + return cli.executeCommandWithArgs(ctx, runArgs, flags) +} + +func (cli *kubectlCli) executeCommandWithArgs( + ctx context.Context, + args exec.RunArgs, + flags *KubeCliFlags, +) (exec.RunResult, error) { + args = args.WithEnrichError(true) + if cli.cwd != "" { + args = args.WithCwd(cli.cwd) + } + + if flags != nil { + if flags.DryRun != "" { + args = args.AppendParams(fmt.Sprintf("--dry-run=%s", flags.DryRun)) + } + if flags.Namespace != "" { + args = args.AppendParams("-n", flags.Namespace) + } + if flags.Output != "" { + args = args.AppendParams("-o", flags.Output) + } + } + + return cli.commandRunner.Run(ctx, args) +} + +func environ(values map[string]string) []string { + env := []string{} + for key, value := range values { + env = append(env, fmt.Sprintf("%s=%s", key, value)) + } + + return env +} diff --git a/cli/azd/pkg/tools/kubectl/kubectl_test.go b/cli/azd/pkg/tools/kubectl/kubectl_test.go new file mode 100644 index 00000000000..18dc2717ff0 --- /dev/null +++ b/cli/azd/pkg/tools/kubectl/kubectl_test.go @@ -0,0 +1,176 @@ +package kubectl + +import ( + "bytes" + "context" + "os" + "strings" + "testing" + + "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/pkg/osutil" + "github.com/azure/azure-dev/cli/azd/test/mocks" + "github.com/azure/azure-dev/cli/azd/test/ostest" + "github.com/stretchr/testify/require" +) + +func Test_ApplyFiles(t *testing.T) { + tempDir := t.TempDir() + ostest.Chdir(t, tempDir) + + ran := false + var runArgs exec.RunArgs + + mockContext := mocks.NewMockContext(context.Background()) + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, "kubectl apply -f") + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + runArgs = args + ran = true + + return exec.NewRunResult(0, "", ""), nil + }) + + cli := NewKubectl(mockContext.CommandRunner) + + err := os.WriteFile("test.yaml", []byte("yaml"), osutil.PermissionFile) + require.NoError(t, err) + + err = cli.ApplyFiles(*mockContext.Context, tempDir, &KubeCliFlags{ + Namespace: "test-namespace", + }) + require.NoError(t, err) + + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(runArgs.StdIn) + require.NoError(t, err) + + require.NoError(t, err) + require.True(t, ran) + require.Equal(t, "kubectl", runArgs.Cmd) + require.Equal(t, "yaml", buf.String()) + require.Equal(t, []string{"apply", "-f", "-", "-n", "test-namespace"}, runArgs.Args) +} + +func Test_Command_Args(t *testing.T) { + tempDir := t.TempDir() + ostest.Chdir(t, tempDir) + + mockContext := mocks.NewMockContext(context.Background()) + cli := NewKubectl(mockContext.CommandRunner) + + tests := map[string]*kubeCliTestConfig{ + "apply-pipe": { + mockCommandPredicate: "kubectl apply -f -", + expectedCmd: "kubectl", + expectedArgs: []string{"apply", "-f", "-", "-n", "test-namespace"}, + testFn: func() error { + _, err := cli.ApplyPipe(*mockContext.Context, "input", &KubeCliFlags{ + Namespace: "test-namespace", + }) + + return err + }, + }, + "config-view": { + mockCommandPredicate: "kubectl config view", + expectedCmd: "kubectl", + expectedArgs: []string{"config", "view", "--merge", "--flatten"}, + testFn: func() error { + _, err := cli.ConfigView(*mockContext.Context, true, true, nil) + + return err + }, + }, + "config-use-context": { + mockCommandPredicate: "kubectl config use-context", + expectedCmd: "kubectl", + expectedArgs: []string{"config", "use-context", "context-name"}, + testFn: func() error { + _, err := cli.ConfigUseContext(*mockContext.Context, "context-name", nil) + + return err + }, + }, + "create-namespace": { + mockCommandPredicate: "kubectl create namespace", + expectedCmd: "kubectl", + expectedArgs: []string{"create", "namespace", "namespace-name", "--dry-run=client", "-o", "yaml"}, + testFn: func() error { + _, err := cli.CreateNamespace(*mockContext.Context, "namespace-name", &KubeCliFlags{ + DryRun: "client", + Output: "yaml", + }) + + return err + }, + }, + "create-secret": { + mockCommandPredicate: "kubectl create secret generic", + expectedCmd: "kubectl", + expectedArgs: []string{ + "create", + "secret", + "generic", + "secret-name", + "--from-literal=foo=bar", + "-n", + "test-namespace", + }, + testFn: func() error { + _, err := cli.CreateSecretGenericFromLiterals( + *mockContext.Context, + "secret-name", + []string{"foo=bar"}, + &KubeCliFlags{ + Namespace: "test-namespace", + }, + ) + + return err + }, + }, + "exec": { + mockCommandPredicate: "kubectl get deployment", + expectedCmd: "kubectl", + expectedArgs: []string{"get", "deployment", "-n", "test-namespace", "-o", "json"}, + testFn: func() error { + _, err := cli.Exec(*mockContext.Context, &KubeCliFlags{ + Namespace: "test-namespace", + Output: "json", + }, "get", "deployment") + + return err + }, + }, + } + + for testName, config := range tests { + mockContext.CommandRunner.When(func(args exec.RunArgs, command string) bool { + return strings.Contains(command, config.mockCommandPredicate) + }).RespondFn(func(args exec.RunArgs) (exec.RunResult, error) { + config.ran = true + config.actualArgs = &args + + return exec.NewRunResult(0, config.mockCommandResult, ""), nil + }) + + t.Run(testName, func(t *testing.T) { + err := config.testFn() + require.NoError(t, err) + require.True(t, config.ran) + require.Equal(t, config.expectedCmd, config.actualArgs.Cmd) + require.Equal(t, config.expectedArgs, config.actualArgs.Args) + }) + } +} + +type kubeCliTestConfig struct { + mockCommandPredicate string + mockCommandResult string + expectedCmd string + expectedArgs []string + actualArgs *exec.RunArgs + ran bool + testFn func() error +} diff --git a/cli/azd/pkg/tools/kubectl/models.go b/cli/azd/pkg/tools/kubectl/models.go new file mode 100644 index 00000000000..e38610838b5 --- /dev/null +++ b/cli/azd/pkg/tools/kubectl/models.go @@ -0,0 +1,150 @@ +package kubectl + +type ResourceType string + +const ( + ResourceTypeDeployment ResourceType = "deployment" + ResourceTypeIngress ResourceType = "ing" + ResourceTypeService ResourceType = "svc" +) + +type Resource struct { + ApiVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Metadata ResourceMetadata `json:"metadata"` +} + +type List[T any] struct { + Resource + Items []T `json:"items"` +} + +type ResourceWithSpec[T any, S any] struct { + Resource + Spec T `json:"spec"` + Status S `json:"status"` +} + +type ResourceMetadata struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + Annotations map[string]any +} + +type Deployment ResourceWithSpec[DeploymentSpec, DeploymentStatus] + +type DeploymentSpec struct { + Replicas int `yaml:"replicas"` +} + +type DeploymentStatus struct { + AvailableReplicas int `yaml:"availableReplicas"` + ReadyReplicas int `yaml:"readyReplicas"` + Replicas int `yaml:"replicas"` + UpdatedReplicas int `yaml:"updatedReplicas"` +} + +type Ingress ResourceWithSpec[IngressSpec, IngressStatus] + +type IngressSpec struct { + IngressClassName string `json:"ingressClassName"` + Tls *IngressTls + Rules []IngressRule +} + +type IngressTls struct { + Hosts []string `yaml:"hosts"` + SecretName string `yaml:"secretName"` +} + +type IngressRule struct { + Host *string `yaml:"host"` + Http IngressRuleHttp `yaml:"http"` +} + +type IngressRuleHttp struct { + Paths []IngressPath `yaml:"paths"` +} + +type IngressPath struct { + Path string `yaml:"path"` + PathType string `yaml:"pathType"` +} + +type IngressStatus struct { + LoadBalancer LoadBalancer `json:"loadBalancer"` +} + +type LoadBalancer struct { + Ingress []LoadBalancerIngress `json:"ingress"` +} + +type LoadBalancerIngress struct { + Ip string `json:"ip"` +} + +type Service ResourceWithSpec[ServiceSpec, ServiceStatus] + +type ServiceType string + +const ( + ServiceTypeClusterIp ServiceType = "ClusterIP" + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + ServiceTypeNodePort ServiceType = "NodePort" + ServiceTypeExternalName ServiceType = "ExternalName" +) + +type ServiceSpec struct { + Type ServiceType `json:"type"` + ClusterIp string `json:"clusterIP"` + ClusterIps []string `json:"clusterIPs"` + Ports []Port `json:"ports"` +} + +type ServiceStatus struct { + LoadBalancer LoadBalancer `json:"loadBalancer"` +} + +type Port struct { + Port int `json:"port"` + TargetPort int `json:"targetPort"` + Protocol string `json:"protocol"` +} + +type KubeConfig struct { + ApiVersion string `yaml:"apiVersion"` + Clusters []*KubeCluster `yaml:"clusters"` + Contexts []*KubeContext `yaml:"contexts"` + Users []*KubeUser `yaml:"users"` + Kind string `yaml:"kind"` + CurrentContext string `yaml:"current-context"` + Preferences KubePreferences `yaml:"preferences"` +} + +type KubeCluster struct { + Name string `yaml:"name"` + Cluster KubeClusterData `yaml:"cluster"` +} + +type KubeClusterData struct { + CertificateAuthorityData string `yaml:"certificate-authority-data"` + Server string `yaml:"server"` +} + +type KubeContext struct { + Name string `yaml:"name"` + Context KubeContextData `yaml:"context"` +} + +type KubeContextData struct { + Cluster string `yaml:"cluster"` + User string `yaml:"user"` +} + +type KubeUser struct { + Name string `yaml:"name"` + KubeUserData KubeUserData `yaml:"user"` +} + +type KubeUserData map[string]any +type KubePreferences map[string]any diff --git a/cli/azd/pkg/tools/kubectl/util.go b/cli/azd/pkg/tools/kubectl/util.go new file mode 100644 index 00000000000..c2721bc8611 --- /dev/null +++ b/cli/azd/pkg/tools/kubectl/util.go @@ -0,0 +1,148 @@ +package kubectl + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/sethvargo/go-retry" + "gopkg.in/yaml.v3" +) + +var ( + ErrResourceNotFound = errors.New("cannot find resource") + ErrResourceNotReady = errors.New("resource is not ready") +) + +func GetResource[T any]( + ctx context.Context, + cli KubectlCli, + resourceType ResourceType, + resourceName string, + flags *KubeCliFlags, +) (T, error) { + if flags == nil { + flags = &KubeCliFlags{} + } + + if flags.Output == "" { + flags.Output = "json" + } + + var zero T + + res, err := cli.Exec(ctx, flags, "get", string(resourceType), resourceName) + if err != nil { + return zero, fmt.Errorf("failed getting resources, %w", err) + } + + var resource T + + switch flags.Output { + case "json": + err = json.Unmarshal([]byte(res.Stdout), &resource) + if err != nil { + return zero, fmt.Errorf("failed unmarshalling resources JSON, %w", err) + } + case "yaml": + err = yaml.Unmarshal([]byte(res.Stdout), &resource) + if err != nil { + return zero, fmt.Errorf("failed unmarshalling resources YAML, %w", err) + } + default: + return zero, fmt.Errorf("failed unmarshalling resources. Output format '%s' is not supported", flags.Output) + } + + return resource, nil +} + +func GetResources[T any]( + ctx context.Context, + cli KubectlCli, + resourceType ResourceType, + flags *KubeCliFlags, +) (*List[T], error) { + if flags == nil { + flags = &KubeCliFlags{} + } + + if flags.Output == "" { + flags.Output = "json" + } + + res, err := cli.Exec(ctx, flags, "get", string(resourceType)) + if err != nil { + return nil, fmt.Errorf("failed getting resources, %w", err) + } + + var list List[T] + + switch flags.Output { + case "json": + err = json.Unmarshal([]byte(res.Stdout), &list) + if err != nil { + return nil, fmt.Errorf("failed unmarshalling resources JSON, %w", err) + } + case "yaml": + err = yaml.Unmarshal([]byte(res.Stdout), &list) + if err != nil { + return nil, fmt.Errorf("failed unmarshalling resources YAML, %w", err) + } + default: + return nil, fmt.Errorf("failed unmarshalling resources. Output format '%s' is not supported", flags.Output) + } + + return &list, nil +} + +type ResourceFilterFn[T comparable] func(resource T) bool + +func WaitForResource[T comparable]( + ctx context.Context, + cli KubectlCli, + namespace string, + resourceType ResourceType, + resourceFilter ResourceFilterFn[T], + readyStatusFilter ResourceFilterFn[T], +) (T, error) { + var resource T + var zero T + err := retry.Do( + ctx, + retry.WithMaxDuration(time.Minute*10, retry.NewConstant(time.Second*10)), + func(ctx context.Context) error { + result, err := GetResources[T](ctx, cli, resourceType, &KubeCliFlags{ + Namespace: namespace, + }) + + if err != nil { + return fmt.Errorf("failed waiting for resource, %w", err) + } + + for _, r := range result.Items { + if resourceFilter(r) { + resource = r + break + } + } + + if resource == zero { + return fmt.Errorf("cannot find resource for '%s', %w", resourceType, ErrResourceNotFound) + } + + if !readyStatusFilter(resource) { + return retry.RetryableError(fmt.Errorf("resource '%s' is not ready, %w", resourceType, ErrResourceNotReady)) + } + + return nil + }, + ) + + if err != nil { + return zero, fmt.Errorf("failed waiting for resource, %w", err) + } + + return resource, nil +} diff --git a/cli/azd/resources/templates.json b/cli/azd/resources/templates.json index ab08012ee2d..84ddca4bfd1 100644 --- a/cli/azd/resources/templates.json +++ b/cli/azd/resources/templates.json @@ -63,5 +63,10 @@ "name": "Azure-Samples/todo-python-mongo-terraform", "description": "ToDo Application with a Python API and Azure Cosmos DB API for MongoDB on Azure App Service", "repositoryPath": "Azure-Samples/todo-python-mongo-terraform" + }, + { + "name": "Azure-Samples/todo-nodejs-mongo-aks", + "description": "ToDo Application with a Node.js API and Azure Cosmos DB API for MongoDB on Azure Kubernetes Service", + "repositoryPath": "Azure-Samples/todo-nodejs-mongo-aks" } ] \ No newline at end of file diff --git a/eng/pipelines/release-cli.yml b/eng/pipelines/release-cli.yml index a8f531adb75..855ec4e5b0a 100644 --- a/eng/pipelines/release-cli.yml +++ b/eng/pipelines/release-cli.yml @@ -96,6 +96,7 @@ stages: Condition: and(succeeded(), ne(variables['Skip.LiveTest'], 'true')) - template: /eng/pipelines/templates/steps/install-terraform.yml + - template: /eng/pipelines/templates/steps/install-kubectl.yml # Pinning DockerInstaller to 0.209.0 because 0.214.0 has failures. # Remove this pin when later versions succeed. diff --git a/eng/pipelines/templates/steps/install-kubectl.yml b/eng/pipelines/templates/steps/install-kubectl.yml new file mode 100644 index 00000000000..18d9021ef1e --- /dev/null +++ b/eng/pipelines/templates/steps/install-kubectl.yml @@ -0,0 +1,5 @@ +steps: + - task: KubectlInstaller@0 + displayName: Kubectl installer + inputs: + kubectlVersion: latest diff --git a/go.mod b/go.mod index d8216bbe049..ec408d6df7f 100644 --- a/go.mod +++ b/go.mod @@ -50,6 +50,10 @@ require ( ) +require ( + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.2.0 // indirect + github.com/kr/text v0.2.0 // indirect +) require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.0 // indirect diff --git a/go.sum b/go.sum index 4157d383f99..a048f1b13df 100644 --- a/go.sum +++ b/go.sum @@ -71,6 +71,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthoriza github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0/go.mod h1:lPneRe3TwsoDRKY4O6YDLXHhEWrD+TIRa8XrV/3/fqw= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v0.6.0 h1:Z5/bDxQL2Zc9t6ZDwdRU60bpLHZvoKOeuaM7XVbf2z0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v0.6.0/go.mod h1:0FPu3oDRGPvuX1H8TtHJ5XGA0KrXLunomcixR+PQGGA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.2.0 h1:3L+gX5ssCABAToH0VQ64/oNz7rr+ShW+2sB+sonzIlY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.2.0/go.mod h1:4gUds0dEPFIld6DwHfbo0cLBljyIyI5E5ciPb5MLi3Q= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.0.0 h1:lMW1lD/17LUA5z1XTURo7LcVG2ICBPlyMHjIUrcFZNQ= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.0.0 h1:Jc2KcpCDMu7wJfkrzn7fs/53QMDXH78GuqnH4HOd7zs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.0.0/go.mod h1:PFVgFsclKzPqYRT/BiwpfUN22cab0C7FlgXR3iWpwMo= diff --git a/schemas/v1.0/azure.yaml.json b/schemas/v1.0/azure.yaml.json index b394714073d..cdddcd70de1 100644 --- a/schemas/v1.0/azure.yaml.json +++ b/schemas/v1.0/azure.yaml.json @@ -88,7 +88,8 @@ "appservice", "containerapp", "function", - "staticwebapp" + "staticwebapp", + "aks" ] }, "language": { diff --git a/templates/common/infra/bicep/core/host/aks-agent-pool.bicep b/templates/common/infra/bicep/core/host/aks-agent-pool.bicep new file mode 100644 index 00000000000..24796f0f9ca --- /dev/null +++ b/templates/common/infra/bicep/core/host/aks-agent-pool.bicep @@ -0,0 +1,21 @@ +param clusterName string + +@description('The agent pool name') +param name string + +@description('The agent pool configuration') +param config object + +@description('Custom tags to apply to the AKS resources') +param tags object = {} + +resource aksCluster 'Microsoft.ContainerService/managedClusters@2022-11-02-preview' existing = { + name: clusterName +} + +resource nodePool 'Microsoft.ContainerService/managedClusters/agentPools@2022-11-02-preview' = { + parent: aksCluster + name: name + properties: config + tags: tags +} diff --git a/templates/common/infra/bicep/core/host/aks-managed-cluster.bicep b/templates/common/infra/bicep/core/host/aks-managed-cluster.bicep new file mode 100644 index 00000000000..3303227560e --- /dev/null +++ b/templates/common/infra/bicep/core/host/aks-managed-cluster.bicep @@ -0,0 +1,140 @@ +@description('The name for the AKS managed cluster') +param name string + +@description('The name of the resource group for the managed resources of the AKS cluster') +param nodeResourceGroupName string = '' + +@description('The Azure region/location for the AKS resources') +param location string = resourceGroup().location + +@description('Custom tags to apply to the AKS resources') +param tags object = {} + +@description('Kubernetes Version') +param kubernetesVersion string = '1.23.12' + +@description('Whether RBAC is enabled for local accounts') +param enableRbac bool = true + +// Add-ons +@description('Whether web app routing (preview) add-on is enabled') +param webAppRoutingAddon bool = true + +// AAD Integration +@description('Enable Azure Active Directory integration') +param enableAad bool = false + +@description('Enable RBAC using AAD') +param enableAzureRbac bool = false + +@description('The Tenant ID associated to the Azure Active Directory') +param aadTenantId string = '' + +@description('The load balancer SKU to use for ingress into the AKS cluster') +@allowed([ 'basic', 'standard' ]) +param loadBalancerSku string = 'standard' + +@description('Network plugin used for building the Kubernetes network.') +@allowed([ 'azure', 'kubenet', 'none' ]) +param networkPlugin string = 'azure' + +@description('Network policy used for building the Kubernetes network.') +@allowed([ 'azure', 'calico' ]) +param networkPolicy string = 'azure' + +@description('If set to true, getting static credentials will be disabled for this cluster.') +param disableLocalAccounts bool = false + +@description('The managed cluster SKU.') +@allowed([ 'Paid', 'Free' ]) +param sku string = 'Free' + +@description('Configuration of AKS add-ons') +param addOns object = {} + +@description('The log analytics workspace id used for logging & monitoring') +param workspaceId string = '' + +@description('The node pool configuration for the System agent pool') +param systemPoolConfig object + +@description('The DNS prefix to associate with the AKS cluster') +param dnsPrefix string = '' + +resource aks 'Microsoft.ContainerService/managedClusters@2022-11-02-preview' = { + name: name + location: location + tags: tags + identity: { + type: 'SystemAssigned' + } + sku: { + name: 'Basic' + tier: sku + } + properties: { + nodeResourceGroup: !empty(nodeResourceGroupName) ? nodeResourceGroupName : 'rg-mc-${name}' + kubernetesVersion: kubernetesVersion + dnsPrefix: empty(dnsPrefix) ? '${name}-dns' : dnsPrefix + enableRBAC: enableRbac + aadProfile: enableAad ? { + managed: true + enableAzureRBAC: enableAzureRbac + tenantID: aadTenantId + } : null + agentPoolProfiles: [ + systemPoolConfig + ] + networkProfile: { + loadBalancerSku: loadBalancerSku + networkPlugin: networkPlugin + networkPolicy: networkPolicy + } + disableLocalAccounts: disableLocalAccounts && enableAad + addonProfiles: addOns + ingressProfile: { + webAppRouting: { + enabled: webAppRoutingAddon + } + } + } +} + +var aksDiagCategories = [ + 'cluster-autoscaler' + 'kube-controller-manager' + 'kube-audit-admin' + 'guard' +] + +// TODO: Update diagnostics to be its own module +// Blocking issue: https://github.com/Azure/bicep/issues/622 +// Unable to pass in a `resource` scope or unable to use string interpolation in resource types +resource diagnostics 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = if (!empty(workspaceId)) { + name: 'aks-diagnostics' + tags: tags + scope: aks + properties: { + workspaceId: workspaceId + logs: [for category in aksDiagCategories: { + category: category + enabled: true + }] + metrics: [ + { + category: 'AllMetrics' + enabled: true + } + ] + } +} + +@description('The resource name of the AKS cluster') +output clusterName string = aks.name + +@description('The AKS cluster identity') +output clusterIdentity object = { + clientId: aks.properties.identityProfile.kubeletidentity.clientId + objectId: aks.properties.identityProfile.kubeletidentity.objectId + resourceId: aks.properties.identityProfile.kubeletidentity.resourceId +} diff --git a/templates/common/infra/bicep/core/host/aks.bicep b/templates/common/infra/bicep/core/host/aks.bicep new file mode 100644 index 00000000000..b7b64e5a04c --- /dev/null +++ b/templates/common/infra/bicep/core/host/aks.bicep @@ -0,0 +1,213 @@ +@description('The name for the AKS managed cluster') +param name string + +@description('The name for the Azure container registry (ACR)') +param containerRegistryName string + +@description('The name of the connected log analytics workspace') +param logAnalyticsName string = '' + +@description('The name of the keyvault to grant access') +param keyVaultName string + +@description('The Azure region/location for the AKS resources') +param location string = resourceGroup().location + +@description('Custom tags to apply to the AKS resources') +param tags object = {} + +@description('AKS add-ons configuration') +param addOns object = { + azurePolicy: { + enabled: true + config: { + version: 'v2' + } + } + keyVault: { + enabled: true + config: { + enableSecretRotation: 'true' + rotationPollInterval: '2m' + } + } + openServiceMesh: { + enabled: false + config: {} + } + omsAgent: { + enabled: true + config: {} + } + applicationGateway: { + enabled: false + config: {} + } +} + +@allowed([ + 'CostOptimised' + 'Standard' + 'HighSpec' + 'Custom' +]) +@description('The System Pool Preset sizing') +param systemPoolType string = 'CostOptimised' + +@allowed([ + '' + 'CostOptimised' + 'Standard' + 'HighSpec' + 'Custom' +]) +@description('The System Pool Preset sizing') +param agentPoolType string = '' + +// Configure system / user agent pools +@description('Custom configuration of system node pool') +param systemPoolConfig object = {} +@description('Custom configuration of user node pool') +param agentPoolConfig object = {} + +// Configure AKS add-ons +var omsAgentConfig = (!empty(logAnalyticsName) && !empty(addOns.omsAgent) && addOns.omsAgent.enabled) ? union( + addOns.omsAgent, + { + config: { + logAnalyticsWorkspaceResourceID: logAnalytics.id + } + } +) : {} + +var addOnsConfig = union( + (!empty(addOns.azurePolicy) && addOns.azurePolicy.enabled) ? { azurepolicy: addOns.azurePolicy } : {}, + (!empty(addOns.keyVault) && addOns.keyVault.enabled) ? { azureKeyvaultSecretsProvider: addOns.keyVault } : {}, + (!empty(addOns.openServiceMesh) && addOns.openServiceMesh.enabled) ? { openServiceMesh: addOns.openServiceMesh } : {}, + (!empty(addOns.omsAgent) && addOns.omsAgent.enabled) ? { omsagent: omsAgentConfig } : {}, + (!empty(addOns.applicationGateway) && addOns.applicationGateway.enabled) ? { ingressApplicationGateway: addOns.applicationGateway } : {} +) + +// Link to existing log analytics workspace when available +resource logAnalytics 'Microsoft.OperationalInsights/workspaces@2021-12-01-preview' existing = if (!empty(logAnalyticsName)) { + name: logAnalyticsName +} + +var systemPoolSpec = !empty(systemPoolConfig) ? systemPoolConfig : nodePoolPresets[systemPoolType] + +// Create the primary AKS cluster resources and system node pool +module managedCluster 'aks-managed-cluster.bicep' = { + name: 'managed-cluster' + params: { + name: name + location: location + tags: tags + systemPoolConfig: union( + { name: 'npsystem', mode: 'System' }, + nodePoolBase, + systemPoolSpec + ) + addOns: addOnsConfig + workspaceId: !empty(logAnalyticsName) ? logAnalytics.id : '' + } +} + +var hasAgentPool = !empty(agentPoolConfig) || !empty(agentPoolType) +var agentPoolSpec = hasAgentPool && !empty(agentPoolConfig) ? agentPoolConfig : nodePoolPresets[agentPoolType] + +// Create additional user agent pool when specified +module agentPool 'aks-agent-pool.bicep' = if (hasAgentPool) { + name: 'aks-node-pool' + params: { + clusterName: managedCluster.outputs.clusterName + name: 'npuserpool' + config: union({ name: 'npuser', mode: 'User' }, nodePoolBase, agentPoolSpec) + } +} + +// Creates container registry (ACR) +module containerRegistry 'container-registry.bicep' = { + name: 'container-registry' + params: { + name: containerRegistryName + location: location + tags: tags + workspaceId: !empty(logAnalyticsName) ? logAnalytics.id : '' + } +} + +// Grant ACR Pull access from cluster managed identity to container registry +module containerRegistryAccess '../security/registry-access.bicep' = { + name: 'cluster-container-registry-access' + params: { + containerRegistryName: containerRegistry.outputs.name + principalId: managedCluster.outputs.clusterIdentity.objectId + } +} + +// Give the AKS Cluster access to KeyVault +module clusterKeyVaultAccess '../security/keyvault-access.bicep' = { + name: 'cluster-keyvault-access' + params: { + keyVaultName: keyVaultName + principalId: managedCluster.outputs.clusterIdentity.objectId + } +} + +// Helpers for node pool configuration +var nodePoolBase = { + osType: 'Linux' + maxPods: 30 + type: 'VirtualMachineScaleSets' + upgradeSettings: { + maxSurge: '33%' + } +} + +var nodePoolPresets = { + CostOptimised: { + vmSize: 'Standard_B4ms' + count: 1 + minCount: 1 + maxCount: 3 + enableAutoScaling: true + availabilityZones: [] + } + Standard: { + vmSize: 'Standard_DS2_v2' + count: 3 + minCount: 3 + maxCount: 5 + enableAutoScaling: true + availabilityZones: [ + '1' + '2' + '3' + ] + } + HighSpec: { + vmSize: 'Standard_D4s_v3' + count: 3 + minCount: 3 + maxCount: 5 + enableAutoScaling: true + availabilityZones: [ + '1' + '2' + '3' + ] + } +} + +// Module outputs +@description('The resource name of the AKS cluster') +output clusterName string = managedCluster.outputs.clusterName + +@description('The AKS cluster identity') +output clusterIdentity object = managedCluster.outputs.clusterIdentity + +@description('The resource name of the ACR') +output containerRegistryName string = containerRegistry.outputs.name + +@description('The login server for the container registry') +output containerRegistryLoginServer string = containerRegistry.outputs.loginServer diff --git a/templates/common/infra/bicep/core/host/container-registry.bicep b/templates/common/infra/bicep/core/host/container-registry.bicep index 01c32139795..dd122166170 100644 --- a/templates/common/infra/bicep/core/host/container-registry.bicep +++ b/templates/common/infra/bicep/core/host/container-registry.bicep @@ -15,6 +15,9 @@ param sku object = { } param zoneRedundancy string = 'Disabled' +@description('The log analytics workspace id used for logging & monitoring') +param workspaceId string = '' + // 2022-02-01-preview needed for anonymousPullEnabled resource containerRegistry 'Microsoft.ContainerRegistry/registries@2022-02-01-preview' = { name: name @@ -32,5 +35,34 @@ resource containerRegistry 'Microsoft.ContainerRegistry/registries@2022-02-01-pr } } +// TODO: Update diagnostics to be its own module +// Blocking issue: https://github.com/Azure/bicep/issues/622 +// Unable to pass in a `resource` scope or unable to use string interpolation in resource types +resource diagnostics 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = if (!empty(workspaceId)) { + name: 'registry-diagnostics' + tags: tags + scope: containerRegistry + properties: { + workspaceId: workspaceId + logs: [ + { + category: 'ContainerRegistryRepositoryEvents' + enabled: true + } + { + category: 'ContainerRegistryLoginEvents' + enabled: true + } + ] + metrics: [ + { + category: 'AllMetrics' + enabled: true + timeGrain: 'PT1M' + } + ] + } +} + output loginServer string = containerRegistry.properties.loginServer output name string = containerRegistry.name diff --git a/templates/common/infra/bicep/core/security/registry-access.bicep b/templates/common/infra/bicep/core/security/registry-access.bicep new file mode 100644 index 00000000000..056bd6c32ed --- /dev/null +++ b/templates/common/infra/bicep/core/security/registry-access.bicep @@ -0,0 +1,18 @@ +param containerRegistryName string +param principalId string + +var acrPullRole = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') + +resource aksAcrPull 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: containerRegistry // Use when specifying a scope that is different than the deployment scope + name: guid(principalId, 'Acr', acrPullRole) + properties: { + roleDefinitionId: acrPullRole + principalType: 'ServicePrincipal' + principalId: principalId + } +} + +resource containerRegistry 'Microsoft.ContainerRegistry/registries@2022-02-01-preview' existing = { + name: containerRegistryName +} diff --git a/templates/cspell-templates.txt b/templates/cspell-templates.txt index 9792d2a6dc3..403d7afed3a 100644 --- a/templates/cspell-templates.txt +++ b/templates/cspell-templates.txt @@ -54,6 +54,7 @@ uuidv venv virtuals VSIX +webapprouting webfonts webui wwwroot diff --git a/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/azure.yaml b/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/azure.yaml new file mode 100644 index 00000000000..c80261db3b4 --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/azure.yaml @@ -0,0 +1,18 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/v1.0/azure.yaml.json + +name: todo-nodejs-mongo-aks +metadata: + template: todo-nodejs-mongo-aks@0.0.1-beta +services: + web: + project: ../../web/react-fluent + dist: build + language: js + host: aks + api: + project: ../../api/js + language: js + host: aks + k8s: + ingress: + relativePath: api \ No newline at end of file diff --git a/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/infra/main.bicep b/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/infra/main.bicep new file mode 100644 index 00000000000..9014e81e088 --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/infra/main.bicep @@ -0,0 +1,113 @@ +targetScope = 'subscription' + +@minLength(1) +@maxLength(64) +@description('Name of the the environment which is used to generate a short unique hash used in all resources.') +param environmentName string + +@minLength(1) +@description('Primary location for all resources') +param location string + +// Optional parameters to override the default azd resource naming conventions. Update the main.parameters.json file to provide values. e.g.,: +// "resourceGroupName": { +// "value": "myGroupName" +// } +@description('The resource name of the AKS cluster') +param clusterName string = '' + +@description('The resource name of the Container Registry (ACR)') +param containerRegistryName string = '' + +param applicationInsightsDashboardName string = '' +param applicationInsightsName string = '' +param cosmosAccountName string = '' +param cosmosDatabaseName string = '' +param keyVaultName string = '' +param logAnalyticsName string = '' +param resourceGroupName string = '' + +@description('Id of the user or app to assign application roles') +param principalId string = '' + +var abbrs = loadJsonContent('../../../../../../common/infra/bicep/abbreviations.json') +var resourceToken = toLower(uniqueString(subscription().id, environmentName, location)) +var tags = { 'azd-env-name': environmentName } + +// Organize resources in a resource group +resource rg 'Microsoft.Resources/resourceGroups@2021-04-01' = { + name: !empty(resourceGroupName) ? resourceGroupName : '${abbrs.resourcesResourceGroups}${environmentName}' + location: location + tags: tags +} + +// The AKS cluster to host applications +module aks '../../../../../../common/infra/bicep/core/host/aks.bicep' = { + name: 'aks' + scope: rg + params: { + location: location + name: !empty(clusterName) ? clusterName : '${abbrs.containerServiceManagedClusters}${resourceToken}' + agentPoolType: 'Standard' + containerRegistryName: !empty(containerRegistryName) ? containerRegistryName : '${abbrs.containerRegistryRegistries}${resourceToken}' + logAnalyticsName: monitoring.outputs.logAnalyticsWorkspaceName + keyVaultName: keyVault.outputs.name + } +} + +// The application database +module cosmos '../../../../../common/infra/bicep/app/cosmos-mongo-db.bicep' = { + name: 'cosmos' + scope: rg + params: { + accountName: !empty(cosmosAccountName) ? cosmosAccountName : '${abbrs.documentDBDatabaseAccounts}${resourceToken}' + databaseName: cosmosDatabaseName + location: location + tags: tags + keyVaultName: keyVault.outputs.name + } +} + +// Store secrets in a keyvault +module keyVault '../../../../../../common/infra/bicep/core/security/keyvault.bicep' = { + name: 'keyvault' + scope: rg + params: { + name: !empty(keyVaultName) ? keyVaultName : '${abbrs.keyVaultVaults}${resourceToken}' + location: location + tags: tags + principalId: principalId + } +} + +// Monitor application with Azure Monitor +module monitoring '../../../../../../common/infra/bicep/core/monitor/monitoring.bicep' = { + name: 'monitoring' + scope: rg + params: { + location: location + tags: tags + logAnalyticsName: !empty(logAnalyticsName) ? logAnalyticsName : '${abbrs.operationalInsightsWorkspaces}${resourceToken}' + applicationInsightsName: !empty(applicationInsightsName) ? applicationInsightsName : '${abbrs.insightsComponents}${resourceToken}' + applicationInsightsDashboardName: !empty(applicationInsightsDashboardName) ? applicationInsightsDashboardName : '${abbrs.portalDashboards}${resourceToken}' + } +} + +// Data outputs +output AZURE_COSMOS_CONNECTION_STRING_KEY string = cosmos.outputs.connectionStringKey +output AZURE_COSMOS_DATABASE_NAME string = cosmos.outputs.databaseName + +// App outputs +output APPLICATIONINSIGHTS_CONNECTION_STRING string = monitoring.outputs.applicationInsightsConnectionString +output AZURE_KEY_VAULT_ENDPOINT string = keyVault.outputs.endpoint +output AZURE_KEY_VAULT_NAME string = keyVault.outputs.name +output AZURE_LOCATION string = location +output AZURE_TENANT_ID string = tenant().tenantId +output AZURE_AKS_CLUSTER_NAME string = aks.outputs.clusterName +output AZURE_AKS_IDENTITY_CLIENT_ID string = aks.outputs.clusterIdentity.clientId +output AZURE_CONTAINER_REGISTRY_ENDPOINT string = aks.outputs.containerRegistryLoginServer +output AZURE_CONTAINER_REGISTRY_NAME string = aks.outputs.containerRegistryName +output REACT_APP_API_BASE_URL string = '' +output REACT_APP_APPLICATIONINSIGHTS_CONNECTION_STRING string = monitoring.outputs.applicationInsightsConnectionString +output REACT_APP_WEB_BASE_URL string = '' +output SERVICE_API_ENDPOINTS array = [] diff --git a/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/infra/main.parameters.json b/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/infra/main.parameters.json new file mode 100644 index 00000000000..67ad8524c44 --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/infra/main.parameters.json @@ -0,0 +1,15 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "environmentName": { + "value": "${AZURE_ENV_NAME}" + }, + "location": { + "value": "${AZURE_LOCATION}" + }, + "principalId": { + "value": "${AZURE_PRINCIPAL_ID}" + } + } +} \ No newline at end of file diff --git a/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/repo.yaml b/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/repo.yaml new file mode 100644 index 00000000000..304c57c88a0 --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/.repo/bicep/repo.yaml @@ -0,0 +1,185 @@ +templateApi: 1.0.0 +metadata: + type: repo + name: todo-nodejs-mongo-aks + description: ToDo Application with a Node.js API and Azure Cosmos DB API for MongoDB hosted in AKS + +repo: + includeProjectAssets: false + + remotes: + - name: azure-samples-main + url: git@github.com:Azure-Samples/todo-nodejs-mongo-aks.git + - name: azure-samples-staging + url: git@github.com:Azure-Samples/todo-nodejs-mongo-aks.git + branch: staging + + rewrite: + rules: + - from: ../../../../../../common/infra/bicep/core + to: ./core + patterns: + - "**/*.bicep" + + - from: ../../../../../common/infra/bicep/app + to: ./app + patterns: + - "**/*.bicep" + + - from: ../../../../../common/infra/bicep/core + to: ../core + patterns: + - "**/*.bicep" + + # app service modules + - from: ../../../../../../common/infra/bicep + to: ../ + patterns: + - "**/*.bicep" + ignore: + - "**/main.bicep" + + # main.bicep + - from: ../../../../../../common/infra/bicep + to: ./ + patterns: + - "**/main.bicep" + + - from: ../../api/js + to: ./src/api + patterns: + - "**/azure.@(yml|yaml)" + + - from: ../../web/react-fluent + to: ./src/web + patterns: + - "**/azure.@(yml|yaml)" + + - from: web-appservice.bicep + to: web.bicep + patterns: + - "**/main.bicep" + + - from: api-appservice-node.bicep + to: api.bicep + patterns: + - "**/main.bicep" + + - from: cosmos-mongo-db.bicep + to: db.bicep + patterns: + - "**/main.bicep" + + - from: "$PLACEHOLDERIACTOOLS" + to: "" + patterns: + - "README.md" + + - from: "$PLACEHOLDER_TITLE" + to: "ToDo Application with a Node.js API and Azure Cosmos DB API for MongoDB on Azure App Service" + patterns: + - "README.md" + + - from: "$PLACEHOLDER_DESCRIPTION" + to: "using Bicep as the IaC provider" + patterns: + - "README.md" + + - from: ../../../../api/common/openapi.yaml + to: ../../src/api/openapi.yaml + patterns: + - "apim-api.bicep" + + assets: + # Common assets + + # Infra + - from: ./infra/ + to: ./infra + + - from: ../../../../common/infra/bicep/app/cosmos-mongo-db.bicep + to: ./infra/app/db.bicep + + - from: ./../../ + to: ./ + ignore: + - ".repo/**/*" + - "repo.y[a]ml" + - "azure.y[a]ml" + + # openapi.yaml to root + - from: ../../../../api/common + to: ./ + patterns: + - openapi.yaml + + # openapi.yaml to api root + - from: ../../../../api/common + to: ./src/api + patterns: + - openapi.yaml + + # Templates common + - from: ../../../../../common + to: ./ + ignore: + - .github/**/* + - .devcontainer/**/* + - "infra/**/*" + - .azdo/pipelines/*/azure-dev.yml + + # AzDo workflows for bicep + - from: ../../../../../common/.azdo/pipelines/bicep/azure-dev.yml + to: ./.azdo/pipelines/azure-dev.yml + + # Github workflows for bicep + - from: ../../../../../common/.github/workflows/bicep + to: ./.github/workflows + + # azd core modules + - from: ../../../../../common/infra/bicep + to: ./infra + + # .devcontainer common (devcontainer.json) + - from: ../../../../../common/.devcontainer/devcontainer.json/nodejs/devcontainer.json + to: ./.devcontainer/devcontainer.json + + # .devcontainer common (Dockerfile) + - from: ../../../../../common/.devcontainer/Dockerfile/base + to: ./.devcontainer + + # Assets common + - from: ../../../../common/assets + to: ./assets + + # Tests common + - from: ../../../../common/tests + to: ./tests + + # Auth JS common + - from: ../../../../common/auth/js + to: ./src/api/src + + # Node JS API + - from: ../../../../api/js + to: ./src/api + ignore: + - "dist/**/*" + - "coverage/**/*" + - "node_modules/**/*" + - "**/*.log" + + # React Frontend + - from: ../../../../web/react-fluent + to: ./src/web + ignore: + - "build/**/*" + - "node_modules/**/*" + + # Infra + - from: ./infra/ + to: ./infra + + # Azure.yml + - from: ./azure.yaml + to: ./azure.yaml diff --git a/templates/todo/projects/nodejs-mongo-aks/.vscode/launch.json b/templates/todo/projects/nodejs-mongo-aks/.vscode/launch.json new file mode 100644 index 00000000000..709bf3d1c0f --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/.vscode/launch.json @@ -0,0 +1,47 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Debug Web", + "request": "launch", + "type": "msedge", + "webRoot": "${workspaceFolder}/src/web/src", + "url": "http://localhost:3000", + "sourceMapPathOverrides": { + "webpack:///src/*": "${webRoot}/*" + }, + }, + + { + "name": "Debug API", + "request": "launch", + "runtimeArgs": [ + "run", + "start" + ], + "runtimeExecutable": "npm", + "skipFiles": [ + "/**" + ], + "type": "pwa-node", + "cwd": "${workspaceFolder}/src/api", + "envFile": "${input:dotEnvFilePath}", + "env": { + "NODE_ENV": "development" + }, + "preLaunchTask": "Restore API", + "outputCapture": "std" + }, + ], + + "inputs": [ + { + "id": "dotEnvFilePath", + "type": "command", + "command": "azure-dev.commands.getDotEnvFilePath" + } + ] +} diff --git a/templates/todo/projects/nodejs-mongo-aks/.vscode/tasks.json b/templates/todo/projects/nodejs-mongo-aks/.vscode/tasks.json new file mode 100644 index 00000000000..935126d74d7 --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/.vscode/tasks.json @@ -0,0 +1,92 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "Start Web", + "type": "dotenv", + "targetTasks": [ + "Restore Web", + "Web npm start" + ], + "file": "${input:dotEnvFilePath}" + }, + { + "label": "Restore Web", + "type": "shell", + "command": "azd restore --service web", + "presentation": { + "reveal": "silent" + }, + "problemMatcher": [] + }, + { + "label": "Web npm start", + "detail": "Helper task--use 'Start Web' task to ensure environment is set up correctly", + "type": "shell", + "command": "npm run start", + "options": { + "cwd": "${workspaceFolder}/src/web/", + "env": { + "REACT_APP_API_BASE_URL": "http://localhost:3100", + "BROWSER": "none" + } + }, + "presentation": { + "panel": "dedicated", + }, + "problemMatcher": [] + }, + + { + "label": "Start API", + "type": "dotenv", + "targetTasks": [ + "Restore API", + "API npm start" + ], + "file": "${input:dotEnvFilePath}" + }, + { + "label": "Restore API", + "type": "shell", + "command": "azd restore --service api", + "presentation": { + "reveal": "silent" + }, + "problemMatcher": [] + }, + { + "label": "API npm start", + "detail": "Helper task--use 'Start API' task to ensure environment is set up correctly", + "type": "shell", + "command": "npm run start", + "options": { + "cwd": "${workspaceFolder}/src/api/", + "env": { + "NODE_ENV": "development" + } + }, + "presentation": { + "panel": "dedicated", + }, + "problemMatcher": [] + }, + + { + "label": "Start API and Web", + "dependsOn":[ + "Start API", + "Start Web" + ], + "problemMatcher": [] + } + ], + + "inputs": [ + { + "id": "dotEnvFilePath", + "type": "command", + "command": "azure-dev.commands.getDotEnvFilePath" + } + ] +} diff --git a/templates/todo/projects/nodejs-mongo-aks/README.md b/templates/todo/projects/nodejs-mongo-aks/README.md new file mode 100644 index 00000000000..2e52b81b3de --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/README.md @@ -0,0 +1,210 @@ +# ToDo Application with a Node.js API and Azure Cosmos DB API for MongoDB on Azure Kubernetes Service (AKS) + +[![Open in Remote - Containers](https://img.shields.io/static/v1?label=Remote%20-%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/azure-samples/todo-nodejs-mongo) + +A complete ToDo application that includes everything you need to build, deploy, and monitor an Azure solution. This application uses the Azure Developer CLI (azd) to get you up and running on Azure quickly, React.js for the Web application, Node.js for the API, Azure Cosmos DB API for MongoDB for storage, and Azure Monitor for monitoring and logging. It includes application code, tools, and pipelines that serve as a foundation from which you can build upon and customize when creating your own solutions. + +Let's jump in and get the ToDo app up and running in Azure. When you are finished, you will have a fully functional web app deployed on Azure. In later steps, you'll see how to setup a pipeline and monitor the application. + +Screenshot of deployed ToDo app + +Screenshot of the deployed ToDo app + +### Prerequisites + +The following prerequisites are required to use this application. Please ensure that you have them all installed locally. + +- [Azure Developer CLI](https://aka.ms/azure-dev/install) + - Windows: + ```powershell + powershell -ex AllSigned -c "Invoke-RestMethod 'https://aka.ms/install-azd.ps1' | Invoke-Expression" + ``` + - Linux/MacOS: + ``` + curl -fsSL https://aka.ms/install-azd.sh | bash + ``` +- [Azure CLI (2.38.0+)](https://docs.microsoft.com/cli/azure/install-azure-cli) +- [Node.js with npm (16.13.1+)](https://nodejs.org/) - for API backend and Web frontend +- [Git (2.36.1+)](https://git-scm.com/) +$PLACEHOLDERIACTOOLS + +### Quickstart + +The fastest way for you to get this application up and running on Azure is to use the `azd up` command. This single command will create and configure all necessary Azure resources - including access policies and roles for your account and service-to-service communication with Managed Identities. + +1. Open a terminal, create a new empty folder, and change into it. +1. Run the following command to initialize the project, provision Azure resources, and deploy the application code. + +```bash +azd up --template todo-nodejs-mongo-aks +``` + +You will be prompted for the following information: + +- `Environment Name`: This will be used as a prefix for the resource group that will be created to hold all Azure resources. This name should be unique within your Azure subscription. +- `Azure Location`: The Azure location where your resources will be deployed. +- `Azure Subscription`: The Azure Subscription where your resources will be deployed. + +> NOTE: This may take a while to complete as it executes three commands: `azd init` (initializes environment), `azd provision` (provisions Azure resources), and `azd deploy` (deploys application code). You will see a progress indicator as it provisions and deploys your application. + +When `azd up` is complete it will output the following URLs: + +- Azure Portal link to view resources +- ToDo Web application frontend +- ToDo API application + +!["azd up output"](assets/urls.png) + +Click the web application URL to launch the ToDo app. Create a new collection and add some items. This will create monitoring activity in the application that you will be able to see later when you run `azd monitor`. + +> NOTE: +> +> - The `azd up` command will create Azure resources that will incur costs to your Azure subscription. You can clean up those resources manually via the Azure portal or with the `azd down` command. +> - You can call `azd up` as many times as you like to both provision and deploy your solution, but you only need to provide the `--template` parameter the first time you call it to get the code locally. Subsequent `azd up` calls do not require the template parameter. If you do provide the parameter, all your local source code will be overwritten if you agree to overwrite when prompted. +> - You can always create a new environment with `azd env new`. + +### Application Architecture + +This application utilizes the following Azure resources: + +- [**Azure Kubernetes Service (AKS)**](https://docs.microsoft.com/azure/aks) to host the Web frontend and API backend +- [**Azure Cosmos DB API for MongoDB**](https://docs.microsoft.com/azure/cosmos-db/mongodb/mongodb-introduction) for storage +- [**Azure Monitor**](https://docs.microsoft.com/azure/azure-monitor/) for monitoring and logging +- [**Azure Key Vault**](https://docs.microsoft.com/azure/key-vault/) for securing secrets + +Here's a high level architecture diagram that illustrates these components. Notice that these are all contained within a single [resource group](https://docs.microsoft.com/azure/azure-resource-manager/management/manage-resource-groups-portal), that will be created for you when you create the resources. + +Application architecture diagram + +> This template provisions resources to an Azure subscription that you will select upon provisioning them. Please refer to the [Pricing calculator for Microsoft Azure](https://azure.microsoft.com/pricing/calculator/) and, if needed, update the included Azure resource definitions found in `infra/main.bicep` to suit your needs. + +### Application Code + +The repo is structured to follow the [Azure Developer CLI](https://aka.ms/azure-dev/overview) conventions including: + +- **Source Code**: All application source code is located in the `src` folder. +- **Infrastructure as Code**: All application "infrastructure as code" files are located in the `infra` folder. +- **Azure Developer Configuration**: An `azure.yaml` file located in the root that ties the application source code to the Azure services defined in your "infrastructure as code" files. +- **GitHub Actions**: A sample GitHub action file is located in the `.github/workflows` folder. +- **VS Code Configuration**: All VS Code configuration to run and debug the application is located in the `.vscode` folder. + +### Azure Subscription + +This template will create infrastructure and deploy code to Azure. If you don't have an Azure Subscription, you can sign up for a [free account here](https://azure.microsoft.com/free/). + +### Azure Developer CLI - VS Code Extension + +The Azure Developer experience includes an Azure Developer CLI VS Code Extension that mirrors all of the Azure Developer CLI commands into the `azure.yaml` context menu and command palette options. If you are a VS Code user, then we highly recommend installing this extension for the best experience. + +Here's how to install it: + +#### VS Code + +1. Click on the "Extensions" tab in VS Code +1. Search for "Azure Developer CLI" - authored by Microsoft +1. Click "Install" + +#### Marketplace + +1. Go to the [Azure Developer CLI - VS Code Extension](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.azure-dev) page +1. Click "Install" + +Once the extension is installed, you can press `F1`, and type "Azure Developer CLI" to see all of your available options. You can also right click on your project's `azure.yaml` file for a list of commands. + +### Next Steps + +At this point, you have a complete application deployed on Azure. But there is much more that the Azure Developer CLI can do. These next steps will introduce you to additional commands that will make creating applications on Azure much easier. Using the Azure Developer CLI, you can setup your pipelines, monitor your application, test and debug locally. + +#### Set up a pipeline using `azd pipeline` + +This template includes a GitHub Actions pipeline configuration file that will deploy your application whenever code is pushed to the main branch. You can find that pipeline file here: `.github/workflows`. + +Setting up this pipeline requires you to give GitHub permission to deploy to Azure on your behalf, which is done via a Service Principal stored in a GitHub secret named `AZURE_CREDENTIALS`. The `azd pipeline config` command will automatically create a service principal for you. The command also helps to create a private GitHub repository and pushes code to the newly created repo. + +Before you call the `azd pipeline config` command, you'll need to install the following: + +- [GitHub CLI (2.3+)](https://github.com/cli/cli) + +Run the following command to set up a GitHub Action: + +```bash +azd pipeline config +``` + +> Support for Azure DevOps Pipelines is coming soon to `azd pipeline config`. In the meantime, you can follow the instructions found here: [.azdo/pipelines/README.md](./.azdo/pipelines/README.md) to set it up manually. + +#### Monitor the application using `azd monitor` + +To help with monitoring applications, the Azure Dev CLI provides a `monitor` command to help you get to the various Application Insights dashboards. + +- Run the following command to open the "Overview" dashboard: + + ```bash + azd monitor --overview + ``` + +- Live Metrics Dashboard + + Run the following command to open the "Live Metrics" dashboard: + + ```bash + azd monitor --live + ``` + +- Logs Dashboard + + Run the following command to open the "Logs" dashboard: + + ```bash + azd monitor --logs + ``` + +#### Run and Debug Locally + +The easiest way to run and debug is to leverage the Azure Developer CLI Visual Studio Code Extension. Refer to this [walk-through](https://aka.ms/azure-dev/vscode) for more details. + +#### Clean up resources + +When you are done, you can delete all the Azure resources created with this template by running the following command: + +```bash +azd down +``` + +### Additional azd commands + +The Azure Developer CLI includes many other commands to help with your Azure development experience. You can view these commands at the terminal by running `azd help`. You can also view the full list of commands on our [Azure Developer CLI command](https://aka.ms/azure-dev/ref) page. + +## Troubleshooting/Known issues + +Sometimes, things go awry. If you happen to run into issues, then please review our ["Known Issues"](https://aka.ms/azure-dev/knownissues) page for help. If you continue to have issues, then please file an issue in our main [Azure Dev](https://aka.ms/azure-dev/issues) repository. + +## Security + +### Roles + +This template creates a [managed identity](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) for your app inside your Azure Active Directory tenant, and it is used to authenticate your app with Azure and other services that support Azure AD authentication like Key Vault via access policies. You will see principalId referenced in the infrastructure as code files, that refers to the id of the currently logged in Azure CLI user, which will be granted access policies and permissions to run the application locally. To view your managed identity in the Azure Portal, follow these [steps](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-view-managed-identity-service-principal-portal). + +### Key Vault + +This template uses [Azure Key Vault](https://docs.microsoft.com/azure/key-vault/general/overview) to securely store your Cosmos DB connection string for the provisioned Cosmos DB account. Key Vault is a cloud service for securely storing and accessing secrets (API keys, passwords, certificates, cryptographic keys) and makes it simple to give other Azure services access to them. As you continue developing your solution, you may add as many secrets to your Key Vault as you require. + +## Uninstall + +To uninstall the Azure Developer CLI: + +Windows: + +``` +powershell -ex AllSigned -c "Invoke-RestMethod 'https://aka.ms/uninstall-azd.ps1' | Invoke-Expression" +``` + +Linux/MacOS: + +``` +curl -fsSL https://aka.ms/uninstall-azd.sh | bash +``` + +## Reporting Issues and Feedback + +If you have any feature requests, issues, or areas for improvement, please [file an issue](https://aka.ms/azure-dev/issues). To keep up-to-date, ask questions, or share suggestions, join our [GitHub Discussions](https://aka.ms/azure-dev/discussions). You may also contact us via AzDevTeam@microsoft.com. diff --git a/templates/todo/projects/nodejs-mongo-aks/assets/resources.png b/templates/todo/projects/nodejs-mongo-aks/assets/resources.png new file mode 100644 index 00000000000..1f2c85a4693 Binary files /dev/null and b/templates/todo/projects/nodejs-mongo-aks/assets/resources.png differ diff --git a/templates/todo/projects/nodejs-mongo-aks/src/api/manifests/deployment.yaml b/templates/todo/projects/nodejs-mongo-aks/src/api/manifests/deployment.yaml new file mode 100644 index 00000000000..809f242de05 --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/src/api/manifests/deployment.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: todo-api +spec: + replicas: 2 + selector: + matchLabels: + app: todo-api + template: + metadata: + labels: + app: todo-api + spec: + containers: + - name: todo-api + image: ${SERVICE_API_IMAGE_NAME} + ports: + - containerPort: 3100 + env: + - name: AZURE_CLIENT_ID + value: ${AZURE_AKS_IDENTITY_CLIENT_ID} + - name: AZURE_KEY_VAULT_ENDPOINT + valueFrom: + secretKeyRef: + name: azd + key: AZURE_KEY_VAULT_ENDPOINT + optional: false + - name: APPLICATIONINSIGHTS_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: azd + key: APPLICATIONINSIGHTS_CONNECTION_STRING + optional: false diff --git a/templates/todo/projects/nodejs-mongo-aks/src/api/manifests/ingress.yaml b/templates/todo/projects/nodejs-mongo-aks/src/api/manifests/ingress.yaml new file mode 100644 index 00000000000..c2baf8239ac --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/src/api/manifests/ingress.yaml @@ -0,0 +1,19 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: todo-ingress-api + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: webapprouting.kubernetes.azure.com + rules: + - http: + paths: + - path: /api(/|$)(.*) + pathType: Prefix + backend: + service: + name: todo-api + port: + number: 80 diff --git a/templates/todo/projects/nodejs-mongo-aks/src/api/manifests/service.yaml b/templates/todo/projects/nodejs-mongo-aks/src/api/manifests/service.yaml new file mode 100644 index 00000000000..a1622390d2b --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/src/api/manifests/service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: todo-api +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 3100 + selector: + app: todo-api diff --git a/templates/todo/projects/nodejs-mongo-aks/src/web/manifests/deployment.yaml b/templates/todo/projects/nodejs-mongo-aks/src/web/manifests/deployment.yaml new file mode 100644 index 00000000000..b64f541c77f --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/src/web/manifests/deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: todo-web +spec: + replicas: 2 + selector: + matchLabels: + app: todo-web + template: + metadata: + labels: + app: todo-web + spec: + containers: + - name: todo-web + image: ${SERVICE_WEB_IMAGE_NAME} + ports: + - containerPort: 3000 + env: + - name: REACT_APP_API_BASE_URL + value: /api + - name: REACT_APP_APPLICATIONINSIGHTS_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: azd + key: APPLICATIONINSIGHTS_CONNECTION_STRING + optional: false diff --git a/templates/todo/projects/nodejs-mongo-aks/src/web/manifests/ingress.yaml b/templates/todo/projects/nodejs-mongo-aks/src/web/manifests/ingress.yaml new file mode 100644 index 00000000000..5676d5a9e01 --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/src/web/manifests/ingress.yaml @@ -0,0 +1,16 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: todo-ingress-web +spec: + ingressClassName: webapprouting.kubernetes.azure.com + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: todo-web + port: + number: 80 diff --git a/templates/todo/projects/nodejs-mongo-aks/src/web/manifests/service.yaml b/templates/todo/projects/nodejs-mongo-aks/src/web/manifests/service.yaml new file mode 100644 index 00000000000..ff91f4ba4b9 --- /dev/null +++ b/templates/todo/projects/nodejs-mongo-aks/src/web/manifests/service.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: todo-web +spec: + type: ClusterIP + ports: + - port: 80 + selector: + app: todo-web