diff --git a/.travis.yml b/.travis.yml index 0b1b1a426..a99cd038b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,10 +14,6 @@ before_install: - source ${GOPATH}/src/github.com/pulumi/scripts/ci/prepare-environment.sh - source ${PULUMI_SCRIPTS}/ci/keep-failed-tests.sh - sudo apt-get update && sudo apt-get install -y apt-transport-https -- curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - -- echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list -- sudo apt-get update -- sudo apt-get install -y kubectl install: - source ${PULUMI_SCRIPTS}/ci/install-common-toolchain.sh - curl -L https://get.pulumi.com/ | bash @@ -32,10 +28,15 @@ install: | bash - helm init -c - helm repo add bitnami https://charts.bitnami.com/bitnami +# Install aws-iam-authenticator +# See: https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html) - curl -o aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.13.7/2019-06-11/bin/linux/amd64/aws-iam-authenticator - chmod +x ./aws-iam-authenticator -- mkdir -p $HOME/bin && cp ./aws-iam-authenticator $HOME/bin/aws-iam-authenticator -- +- sudo mv aws-iam-authenticator /usr/local/bin +# Install kubectl +- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl +- chmod +x ./kubectl +- sudo mv kubectl /usr/local/bin before_script: - "${PULUMI_SCRIPTS}/ci/ensure-dependencies" after_failure: diff --git a/aws-ts-eks-migrate-nodegroups/Pulumi.yaml b/aws-ts-eks-migrate-nodegroups/Pulumi.yaml new file mode 100644 index 000000000..7305e570a --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/Pulumi.yaml @@ -0,0 +1,3 @@ +name: aws-ts-eks-migrate-nodegroups +description: Creates an EKS cluster with node groups and a workload. Then covers how to add an additional node group, and use it to migrate the workload with zero downtime. +runtime: nodejs diff --git a/aws-ts-eks-migrate-nodegroups/README.md b/aws-ts-eks-migrate-nodegroups/README.md new file mode 100755 index 000000000..115a2df8b --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/README.md @@ -0,0 +1,4 @@ +# examples/aws-ts-eks-migrate-nodegroups + +Creates an EKS cluster with node groups and a workload, and showcases how add an +additional node group to use for workload migration with zero downtime. diff --git a/aws-ts-eks-migrate-nodegroups/echoserver.ts b/aws-ts-eks-migrate-nodegroups/echoserver.ts new file mode 100644 index 000000000..bfccc16da --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/echoserver.ts @@ -0,0 +1,161 @@ +import * as eks from "@pulumi/eks"; +import * as k8s from "@pulumi/kubernetes"; +import * as pulumi from "@pulumi/pulumi"; + +// Create the echoserver workload's Service, Deployment and Ingress. +interface EchoserverArgs { + replicas: pulumi.Input; + namespace: pulumi.Input; + ingressClass: pulumi.Input; + provider: k8s.Provider; +} +export function create( + name: string, + args: EchoserverArgs, +): k8s.core.v1.Service { + + const labels = {app: name}; + + // Create the Service. + const service = createService(name, { + labels: labels, + namespace: args.namespace, + provider: args.provider, + }); + const serviceName = service.metadata.name; + + // Deploy the echoserver in the general, standard nodegroup. + const deployment = createDeployment(name, { + replicas: args.replicas, + labels: labels, + namespace: args.namespace, + provider: args.provider, + }); + + // Create the Ingress. + const ingress = createIngress(name, { + labels: labels, + namespace: args.namespace, + ingressClass: args.ingressClass, + serviceName: serviceName, + provider: args.provider, + }); + + return service; +} + +interface EchoserverServiceArgs { + labels: pulumi.Input; + namespace: pulumi.Input; + provider: k8s.Provider; +} +export function createService( + name: string, + args: EchoserverServiceArgs, +): k8s.core.v1.Service { + return new k8s.core.v1.Service( + name, + { + metadata: { + labels: args.labels, + namespace: args.namespace, + }, + spec: { + type: "ClusterIP", + ports: [{port: 80, protocol: "TCP", targetPort: "http"}], + selector: args.labels, + }, + }, + { + provider: args.provider, + }, + ); +} + +interface EchoserverDeploymentArgs { + replicas: pulumi.Input; + labels: pulumi.Input; + namespace: pulumi.Input; + provider: k8s.Provider; +} +export function createDeployment( + name: string, + args: EchoserverDeploymentArgs, +): k8s.apps.v1.Deployment { + return new k8s.apps.v1.Deployment(name, + { + metadata: { + labels: args.labels, + namespace: args.namespace, + }, + spec: { + replicas: args.replicas, + selector: { matchLabels: args.labels }, + template: { + metadata: { labels: args.labels, namespace: args.namespace }, + spec: { + restartPolicy: "Always", + containers: [ + { + name: name, + image: "gcr.io/google-containers/echoserver:1.5", + ports: [{ name: "http", containerPort: 8080 }], + }, + ], + }, + }, + }, + }, + { + provider: args.provider, + }, + ); +} + +interface EchoserverIngressArgs { + labels: pulumi.Input; + namespace: pulumi.Input; + ingressClass: pulumi.Input; + serviceName: pulumi.Input; + provider: k8s.Provider; +} +export function createIngress( + name: string, + args: EchoserverIngressArgs, +): k8s.extensions.v1beta1.Ingress { + // TODO(metral): change to k8s.networking.v1beta.Ingress + // when EKS supports >= 1.14. + return new k8s.extensions.v1beta1.Ingress( + name, + { + metadata: { + labels: args.labels, + namespace: args.namespace, + annotations: { + "kubernetes.io/ingress.class": args.ingressClass, + }, + }, + spec: { + rules: [ + { + host: "apps.example.com", + http: { + paths: [ + { + path: "/echoserver", + backend: { + serviceName: args.serviceName, + servicePort: "http", + }, + }, + ], + }, + }, + ], + }, + }, + { + provider: args.provider, + }, + ); +} diff --git a/aws-ts-eks-migrate-nodegroups/iam.ts b/aws-ts-eks-migrate-nodegroups/iam.ts new file mode 100644 index 000000000..4d9f1d309 --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/iam.ts @@ -0,0 +1,51 @@ +import * as aws from "@pulumi/aws"; +import * as pulumi from "@pulumi/pulumi"; +import * as iam from "./iam"; + +const managedPolicyArns: string[] = [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", +]; + +// Creates a role and attaches the EKS worker node IAM managed policies +export function createRole(name: string): aws.iam.Role { + const role = new aws.iam.Role(name, { + assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal({ + Service: "ec2.amazonaws.com", + }), + }); + + let counter = 0; + for (const policy of managedPolicyArns) { + // Create RolePolicyAttachment without returning it. + const rpa = new aws.iam.RolePolicyAttachment(`${name}-policy-${counter++}`, + { policyArn: policy, role: role }, + ); + } + + return role; +} + +// Creates a collection of IAM roles. +export function createRoles(name: string, quantity: number): aws.iam.Role[] { + const roles: aws.iam.Role[] = []; + + for (let i = 0; i < quantity; i++) { + roles.push(iam.createRole(`${name}-role-${i}`)); + } + + return roles; +} + +// Creates a collection of IAM instance profiles from the given roles. +export function createInstanceProfiles(name: string, roles: aws.iam.Role[]): aws.iam.InstanceProfile[] { + const profiles: aws.iam.InstanceProfile[] = []; + + for (let i = 0; i < roles.length; i++) { + const role = roles[i]; + profiles.push(new aws.iam.InstanceProfile(`${name}-instanceProfile-${i}`, {role: role})); + } + + return profiles; +} diff --git a/aws-ts-eks-migrate-nodegroups/index.ts b/aws-ts-eks-migrate-nodegroups/index.ts new file mode 100644 index 000000000..8ad51de68 --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/index.ts @@ -0,0 +1,83 @@ +import * as awsx from "@pulumi/awsx"; import * as eks from "@pulumi/eks"; +import * as k8s from "@pulumi/kubernetes"; +import * as pulumi from "@pulumi/pulumi"; +import * as echoserver from "./echoserver"; +import * as iam from "./iam"; +import * as nginx from "./nginx"; +import * as utils from "./utils"; + +const projectName = pulumi.getProject(); + +// Allocate a new VPC with custom settings, and a public & private subnet per AZ. +const vpc = new awsx.ec2.Vpc(`${projectName}`, { + cidrBlock: "172.16.0.0/16", + subnets: [{ type: "public" }, { type: "private" }], +}); + +// Export VPC ID and Subnets. +export const vpcId = vpc.id; +export const allVpcSubnets = vpc.privateSubnetIds.concat(vpc.publicSubnetIds); + +// Create 3 IAM Roles and matching InstanceProfiles to use with the nodegroups. +const roles = iam.createRoles(projectName, 3); +const instanceProfiles = iam.createInstanceProfiles(projectName, roles); + +// Create an EKS cluster. +const myCluster = new eks.Cluster(`${projectName}`, { + version: "1.13", + vpcId: vpcId, + subnetIds: allVpcSubnets, + nodeAssociatePublicIpAddress: false, + skipDefaultNodeGroup: true, + deployDashboard: false, + instanceRoles: roles, + enabledClusterLogTypes: ["api", "audit", "authenticator", + "controllerManager", "scheduler"], +}); +export const kubeconfig = myCluster.kubeconfig; +export const clusterName = myCluster.core.cluster.name; + +// Create a Standard node group of t2.medium workers. +const ngStandard = utils.createNodeGroup(`${projectName}-ng-standard`, { + ami: "ami-03a55127c613349a7", // k8s v1.13.7 in us-west-2 + instanceType: "t2.medium", + desiredCapacity: 3, + cluster: myCluster, + instanceProfile: instanceProfiles[0], +}); + +// Create a 2xlarge node group of t3.2xlarge workers with taints on the nodes +// dedicated for the NGINX Ingress Controller. +const ng2xlarge = utils.createNodeGroup(`${projectName}-ng-2xlarge`, { + ami: "ami-0355c210cb3f58aa2", // k8s v1.12.7 in us-west-2 + instanceType: "t3.2xlarge", + desiredCapacity: 3, + cluster: myCluster, + instanceProfile: instanceProfiles[1], + taints: {"nginx": { value: "true", effect: "NoSchedule"}}, +}); + +// Create a Namespace for NGINX Ingress Controller and the echoserver workload. +const namespace = new k8s.core.v1.Namespace("apps", undefined, { provider: myCluster.provider }); +export const namespaceName = namespace.metadata.name; + +// Deploy the NGINX Ingress Controller on the specified node group. +const image: string = "quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.0"; +const ingressClass: string = "my-nginx-class"; +const nginxService = nginx.create("nginx-ing-cntlr", { + image: image, + replicas: 3, + namespace: namespaceName, + ingressClass: ingressClass, + provider: myCluster.provider, + nodeSelectorTermValues: ["t3.2xlarge"], +}); +export const nginxServiceUrl = nginxService.status.loadBalancer.ingress[0].hostname; + +// Deploy the echoserver Workload on the Standard node group. +const echoserverDeployment = echoserver.create("echoserver", { + replicas: 3, + namespace: namespaceName, + ingressClass: ingressClass, + provider: myCluster.provider, +}); diff --git a/aws-ts-eks-migrate-nodegroups/nginx-ing-cntlr-rbac.ts b/aws-ts-eks-migrate-nodegroups/nginx-ing-cntlr-rbac.ts new file mode 100644 index 000000000..1078df440 --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/nginx-ing-cntlr-rbac.ts @@ -0,0 +1,199 @@ +import * as k8s from "@pulumi/kubernetes"; +import * as pulumi from "@pulumi/pulumi"; + +// Create a ServiceAccount. +interface NginxServiceAccountArgs { + namespace: pulumi.Input; + provider: k8s.Provider; +} +export function makeNginxServiceAccount( + name: string, + args: NginxServiceAccountArgs, +): k8s.core.v1.ServiceAccount { + return new k8s.core.v1.ServiceAccount( + name, + { + metadata: { + namespace: args.namespace, + }, + }, + { + provider: args.provider, + }, + ); +} + +// Create a ClusterRole. +interface NginxClusterRoleArgs { + provider: k8s.Provider; +} +export function makeNginxClusterRole( + name: string, + args: NginxClusterRoleArgs, +): k8s.rbac.v1.ClusterRole { + return new k8s.rbac.v1.ClusterRole( + name, + { + rules: [ + { + apiGroups: [""], + resources: ["configmaps", "endpoints", "nodes", "pods", "secrets"], + verbs: ["list", "watch"], + }, + { + apiGroups: [""], + resources: ["nodes"], + verbs: ["get"], + }, + { + apiGroups: [""], + resources: ["services"], + verbs: ["get", "list", "watch"], + }, + { + // TODO(metral): change to k8s.networking.v1beta.Ingress + // when EKS supports >= 1.14. + apiGroups: ["extensions"], + resources: ["ingresses"], + verbs: ["get", "list", "watch"], + }, + { + apiGroups: [""], + resources: ["events"], + verbs: ["create", "patch"], + }, + { + // TODO(metral): change to k8s.networking.v1beta.Ingress + // when EKS supports >= 1.14. + apiGroups: ["extensions"], + resources: ["ingresses/status"], + verbs: ["update"], + }, + ], + }, + { + provider: args.provider, + }, + ); +} + +// Create a ClusterRoleBinding of the ServiceAccount -> ClusterRole. +interface NginxClusterRoleBindingArgs { + namespace: pulumi.Input; + serviceAccountName: pulumi.Input; + clusterRoleName: pulumi.Input; + provider: k8s.Provider; +} +export function makeNginxClusterRoleBinding( + name: string, + args: NginxClusterRoleBindingArgs, +): k8s.rbac.v1.ClusterRoleBinding { + return new k8s.rbac.v1.ClusterRoleBinding( + name, + { + subjects: [ + { + kind: "ServiceAccount", + name: args.serviceAccountName, + namespace: args.namespace, + }, + ], + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: args.clusterRoleName, + }, + }, + { + provider: args.provider, + }, + ); +} + +// Create a Role. +interface NginxRoleArgs { + namespace: pulumi.Input; + ingressClass: pulumi.Input; + provider: k8s.Provider; +} +export function makeNginxRole( + name: string, + args: NginxRoleArgs, +): k8s.rbac.v1.Role { + return new k8s.rbac.v1.Role( + name, + { + metadata: { + namespace: args.namespace, + }, + rules: [ + { + apiGroups: [""], + resources: ["configmaps", "pods", "secrets", "namespaces"], + verbs: ["get"], + }, + { + apiGroups: [""], + resources: ["configmaps"], + // Defaults to "-" + // In this setup its specifically: "-". + // This has to be adapted if you change either parameter + // (--election-id, and/or --ingress-class) when launching + // the nginx-ing-cntlr. See for more info: + // https://github.com/kubernetes/ingress/tree/master/docs/deploy/rbac.md#namespace-permissions + resourceNames: ["ingress-controller-leader-" + args.ingressClass], + verbs: ["get", "update"], + }, + { + apiGroups: [""], + resources: ["configmaps"], + verbs: ["create"], + }, + { + apiGroups: [""], + resources: ["endpoints"], + verbs: ["get", "create", "update"], + }, + ], + }, + { + provider: args.provider, + }, + ); +} + +// Create a RoleBinding of the ServiceAccount -> Role. +interface NginxRoleBindingArgs { + namespace: pulumi.Input; + serviceAccountName: pulumi.Input; + roleName: pulumi.Input; + provider: k8s.Provider; +} +export function makeNginxRoleBinding( + name: string, + args: NginxRoleBindingArgs, +): k8s.rbac.v1.RoleBinding { + return new k8s.rbac.v1.RoleBinding( + name, + { + metadata: { + namespace: args.namespace, + }, + subjects: [ + { + kind: "ServiceAccount", + name: args.serviceAccountName, + namespace: args.namespace, + }, + ], + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "Role", + name: args.roleName, + }, + }, + { + provider: args.provider, + }, + ); +} diff --git a/aws-ts-eks-migrate-nodegroups/nginx-ing-cntlr.ts b/aws-ts-eks-migrate-nodegroups/nginx-ing-cntlr.ts new file mode 100644 index 000000000..775bd4794 --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/nginx-ing-cntlr.ts @@ -0,0 +1,293 @@ +import * as k8s from "@pulumi/kubernetes"; +import * as input from "@pulumi/kubernetes/types/input"; +import * as pulumi from "@pulumi/pulumi"; +import * as rbac from "./nginx-ing-cntlr-rbac"; + +// Create the NGINX Ingress Controller ServiceAccount, RBAC, Configmap, Deployment, +// and Pod Disruption Budget. +interface NginxStackArgs { + replicas: pulumi.Input; + image: pulumi.Input; + labels: pulumi.Input; + namespace: pulumi.Input; + ingressClass: pulumi.Input; + affinity: input.core.v1.Affinity; + tolerations: input.core.v1.Toleration[]; + provider: k8s.Provider; +} +export function create( + name: string, + args: NginxStackArgs, +): k8s.apps.v1.Deployment { + + const defaultHttpBackendName = "nginx-default-http-backend"; + + // ServiceAccount. + const serviceAccount = rbac.makeNginxServiceAccount(name, { + namespace: args.namespace, + provider: args.provider, + }); + const serviceAccountName = serviceAccount.metadata.name; + + // RBAC ClusterRole. + const clusterRole = rbac.makeNginxClusterRole(name, { + provider: args.provider, + }); + const clusterRoleName = clusterRole.metadata.name; + const clusterRoleBinding = rbac.makeNginxClusterRoleBinding(name, { + namespace: args.namespace, + serviceAccountName: serviceAccountName, + clusterRoleName: clusterRoleName, + provider: args.provider, + }); + + // RBAC Role. + const role = rbac.makeNginxRole(name, { + namespace: args.namespace, + ingressClass: args.ingressClass, + provider: args.provider, + }); + const roleName = role.metadata.name; + const roleBinding = rbac.makeNginxRoleBinding(name, { + namespace: args.namespace, + serviceAccountName: serviceAccountName, + roleName: roleName, + provider: args.provider, + }); + + // NGINX Settings ConfigMap. + const configMap = makeConfigMap(name, { + labels: args.labels, + namespace: args.namespace, + provider: args.provider, + }); + const configMapName = configMap.metadata.name; + + // Assemble the resources. + // Per: https://itnext.io/kubernetes-ingress-controllers-how-to-choose-the-right-one-part-1-41d3554978d2 + const resources: input.core.v1.ResourceRequirements = { + requests: {memory: "1Gi"}, + limits: {memory: "2Gi"}, + }; + + // Create the Deployment. + const deployment = makeDeployment(name, { + replicas: args.replicas, + image: args.image, + labels: args.labels, + namespace: args.namespace, + resources: resources, + ingressClass: args.ingressClass, + serviceAccountName: serviceAccountName, + configMapName: configMapName, + affinity: args.affinity, + tolerations: args.tolerations, + provider: args.provider, + }); + + // Create the PodDisruptionBudget with a minimum availability of 2 pods. + const pdb = makePodDisruptionBudget(name, { + minAvailable: 2, + labels: args.labels, + namespace: args.namespace, + provider: args.provider, + }); + + return deployment; +} + +// Create a ConfigMap for the NGINX Ingress Controller settings. +interface NginxConfigMapArgs { + labels: pulumi.Input; + namespace: pulumi.Input; + provider: k8s.Provider; +} +export function makeConfigMap( + name: string, + args: NginxConfigMapArgs, +): k8s.core.v1.ConfigMap { + return new k8s.core.v1.ConfigMap( + name, + { + metadata: { + labels: args.labels, + namespace: args.namespace, + }, + data: { + "keep-alive": "200", // https://www.nginx.com/blog/testing-performance-nginx-ingress-controller-kubernetes/ + "keep-alive-requests": "10000", + "proxy-connect-timeout": "10", // https://git.io/fjwCj + "proxy-read-timeout": "120", // https://git.io/fjwCj + "proxy-send-timeout": "120", // https://git.io/fjwCj + "proxy-next-upstream": "error timeout http_502 http_503 http_504", // https://git.io/fjwWe + "upstream-keepalive-connections": "128", + "upstream-keepalive-timeout": "315", // https://www.nginx.com/blog/testing-performance-nginx-ingress-controller-kubernetes/ + "upstream-keepalive-requests": "1000000", // https://www.nginx.com/blog/testing-performance-nginx-ingress-controller-kubernetes/ + "worker-processes": "8", // https://itnext.io/kubernetes-ingress-controllers-how-to-choose-the-right-one-part-1-41d3554978d2 + "worker-shutdown-timeout": "60s", + }, + }, + { + provider: args.provider, + }, + ); +} + +// Create the Deployment. +interface NginxDeploymentArgs { + replicas: pulumi.Input; + image: pulumi.Input; + labels: pulumi.Input; + namespace: pulumi.Input; + resources: pulumi.Input; + ingressClass: pulumi.Input; + affinity: input.core.v1.Affinity; + tolerations: input.core.v1.Toleration[]; + serviceAccountName: pulumi.Input; + configMapName: pulumi.Input; + provider: k8s.Provider; +} +export function makeDeployment( + name: string, + args: NginxDeploymentArgs, +): k8s.apps.v1.Deployment { + // Run as www-data user / id 33 + const wwwDataUser: number = 33; + return new k8s.apps.v1.Deployment(name, + { + metadata: { + labels: args.labels, + annotations: { + "prometheus.io/port": "10254", + "prometheus.io/scrape": "true", + }, + namespace: args.namespace, + }, + spec: { + strategy: { + type: "RollingUpdate", + rollingUpdate: { maxSurge: 1, maxUnavailable: 0 }, + }, + replicas: args.replicas, + selector: { matchLabels: args.labels }, + template: { + metadata: { labels: args.labels, namespace: args.namespace }, + spec: { + serviceAccountName: args.serviceAccountName, + terminationGracePeriodSeconds: 120, + affinity: args.affinity, + tolerations: args.tolerations, + containers: [ + { + name: name, + image: args.image, + resources: args.resources, + ports: [{ name: "http", containerPort: 80 }], + securityContext: { + allowPrivilegeEscalation: true, + capabilities: { + drop: ["ALL"], + add: ["NET_BIND_SERVICE"], + }, + runAsUser: wwwDataUser, + }, + env: [ + { + name: "POD_NAME", + valueFrom: { + fieldRef: { + fieldPath: "metadata.name", + }, + }, + }, + { + name: "POD_NAMESPACE", + valueFrom: { + fieldRef: { + fieldPath: "metadata.namespace", + }, + }, + }, + ], + readinessProbe: { + httpGet: { + path: "/healthz", + port: 10254, + scheme: "HTTP", + }, + timeoutSeconds: 10, + periodSeconds: 10, + successThreshold: 1, + failureThreshold: 3, + }, + livenessProbe: { + httpGet: { + path: "/healthz", + port: 10254, + scheme: "HTTP", + }, + initialDelaySeconds: 10, + timeoutSeconds: 10, + periodSeconds: 10, + successThreshold: 1, + failureThreshold: 3, + }, + lifecycle: { + preStop: { + exec: { + command: ["sleep", "20"], + }, + }, + }, + // For more info on all CLI args available: + // https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/cli-arguments.md + args: pulumi.all([ + "/nginx-ingress-controller", + pulumi.concat("--configmap=$(POD_NAMESPACE)/", args.configMapName), + // NGINX service name is fixed vs auto-named & ref'd in order for + // nginx-ing-cntlr arg --publish-service to work. + pulumi.concat("--publish-service=$(POD_NAMESPACE)/", name), + "--annotations-prefix=nginx.ingress.kubernetes.io", + "--ingress-class=" + args.ingressClass, + "--v=2", + ]), + }, + ], + }, + }, + }, + }, + { + provider: args.provider, + }, + ); +} + +// Create a PodDisruptionBudget for the Deployment Pods. +interface PodDisruptionBudgetArgs { + minAvailable: pulumi.Input; + labels: pulumi.Input; + namespace: pulumi.Input; + provider: k8s.Provider; +} +export function makePodDisruptionBudget( + name: string, + args: PodDisruptionBudgetArgs, +): k8s.policy.v1beta1.PodDisruptionBudget { + return new k8s.policy.v1beta1.PodDisruptionBudget( + name, + { + metadata: { + labels: args.labels, + namespace: args.namespace, + }, + spec: { + minAvailable: args.minAvailable, + selector: { matchLabels: args.labels }, + }, + }, + { + provider: args.provider, + }, + ); +} diff --git a/aws-ts-eks-migrate-nodegroups/nginx.ts b/aws-ts-eks-migrate-nodegroups/nginx.ts new file mode 100644 index 000000000..6918dd87a --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/nginx.ts @@ -0,0 +1,128 @@ +import * as eks from "@pulumi/eks"; +import * as k8s from "@pulumi/kubernetes"; +import * as input from "@pulumi/kubernetes/types/input"; +import * as pulumi from "@pulumi/pulumi"; +import * as nginxIngCntlr from "./nginx-ing-cntlr"; + +// Creates the NGINX Ingress Controller. +interface NginxArgs { + image: pulumi.Input; + replicas: pulumi.Input; + namespace: pulumi.Input; + ingressClass: string; + provider: k8s.Provider; + nodeSelectorTermValues: pulumi.Input[]; +} +export function create( + name: string, + args: NginxArgs, +): k8s.core.v1.Service { + // Define the Node affinity to target for the NGINX Deployment. + const affinity: input.core.v1.Affinity = { + // Target the Pods to run on nodes that match the labels for the node + // selector. + nodeAffinity: { + requiredDuringSchedulingIgnoredDuringExecution: { + nodeSelectorTerms: [ + { + matchExpressions: [ + { + key: "beta.kubernetes.io/instance-type", + operator: "In", + values: args.nodeSelectorTermValues, + }, + ], + }, + ], + }, + }, + // Don't co-locate running Pods with matching labels on the same node, + // and spread them per the node hostname. + podAntiAffinity: { + requiredDuringSchedulingIgnoredDuringExecution: [ + { + topologyKey: "kubernetes.io/hostname", + labelSelector: { + matchExpressions: [ + { + key: "app", + operator: "In", + values: [ name ], + }, + ], + }, + }, + ], + }, + }; + + // Define the Pod tolerations of the tainted Nodes to target. + const tolerations: input.core.v1.Toleration[] = [ + { + key: "nginx", + value: "true", + effect: "NoSchedule", + }, + ]; + + const deployment = nginxIngCntlr.create(name, { + replicas: args.replicas, + image: args.image, + labels: {app: name}, + namespace: args.namespace, + ingressClass: args.ingressClass, + affinity: affinity, + tolerations: tolerations, + provider: args.provider, + }); + + const service = createService(name, { + labels: { app: name }, + namespace: args.namespace, + provider: args.provider, + }); + + return service; +} + +// Create the LoadBalancer Service to front the NGINX Ingress Controller, +interface NginxServiceArgs { + labels: pulumi.Input; + namespace: pulumi.Input; + provider: k8s.Provider; +} +export function createService( + name: string, + args: NginxServiceArgs, +): k8s.core.v1.Service { + const ENABLE_DRAINING: pulumi.Input<{[key: string]: pulumi.Input}> = { + "service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled": "true", + }; + const ENABLE_DRAINING_TIMEOUT: pulumi.Input<{[key: string]: pulumi.Input}> = { + "service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout": "60", + }; + return new k8s.core.v1.Service( + name, + { + metadata: { + // NGINX service name is fixed vs auto-named & ref'd in order for + // nginx-ing-cntlr arg --publish-service to work. + name: name, + labels: args.labels, + namespace: args.namespace, + annotations: { + ...ENABLE_DRAINING, + ...ENABLE_DRAINING_TIMEOUT, + }, + }, + spec: { + type: "LoadBalancer", + ports: [{port: 80, protocol: "TCP", targetPort: "http"}], + selector: args.labels, + }, + }, + { + provider: args.provider, + }, + ); +} diff --git a/aws-ts-eks-migrate-nodegroups/package.json b/aws-ts-eks-migrate-nodegroups/package.json new file mode 100644 index 000000000..f6735ff40 --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/package.json @@ -0,0 +1,14 @@ +{ + "name": "aws-ts-eks-migrate-nodegroups", + "devDependencies": { + "typescript": "^3.0.0", + "@types/node": "latest" + }, + "dependencies": { + "@pulumi/aws": "latest", + "@pulumi/awsx": "latest", + "@pulumi/kubernetes": "latest", + "@pulumi/pulumi": "latest", + "@pulumi/eks": "latest" + } +} diff --git a/aws-ts-eks-migrate-nodegroups/scripts/delete-t3.2xlarge-nodes.sh b/aws-ts-eks-migrate-nodegroups/scripts/delete-t3.2xlarge-nodes.sh new file mode 100755 index 000000000..9040954c3 --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/scripts/delete-t3.2xlarge-nodes.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +for node in $(kubectl get nodes -l beta.kubernetes.io/instance-type=t3.2xlarge -o=name); do + echo "On node: $node" + kubectl delete "$node"; +done diff --git a/aws-ts-eks-migrate-nodegroups/scripts/drain-t3.2xlarge-nodes.sh b/aws-ts-eks-migrate-nodegroups/scripts/drain-t3.2xlarge-nodes.sh new file mode 100755 index 000000000..74db92180 --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/scripts/drain-t3.2xlarge-nodes.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +for node in $(kubectl get nodes -l beta.kubernetes.io/instance-type=t3.2xlarge -o=name); do + echo "On node: $node" + kubectl drain --force --ignore-daemonsets --delete-local-data --grace-period=10 "$node"; +done diff --git a/aws-ts-eks-migrate-nodegroups/scripts/load-testing.sh b/aws-ts-eks-migrate-nodegroups/scripts/load-testing.sh new file mode 100755 index 000000000..83844d4fb --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/scripts/load-testing.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +EXPECTEDARGS=2 +if [ $# -lt $EXPECTEDARGS ]; then + echo "Usage: $0 <(optional): NUM_OF_TOTAL_REQUESTS> <(optional): NUM_OF_CONCURRENT_REQUESTS>" + exit 0 +fi + +LB=$1 +LOOPS=$2 +REQS=${3:-50000} +CONCURRENCY=${4:-100} + +for ((i=0;i<$LOOPS;i++)) +do + echo "===================================" + echo `date` + echo "loop #: $(($i+1)) of $LOOPS" + hey -n $REQS -c $CONCURRENCY -host "apps.example.com" http://$LB/echoserver + echo "-------------------" +done diff --git a/aws-ts-eks-migrate-nodegroups/tsconfig.json b/aws-ts-eks-migrate-nodegroups/tsconfig.json new file mode 100644 index 000000000..2ea71672d --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "outDir": "bin", + "target": "es6", + "lib": [ + "es6" + ], + "module": "commonjs", + "moduleResolution": "node", + "declaration": true, + "sourceMap": true, + "stripInternal": true, + "experimentalDecorators": true, + "pretty": true, + "noFallthroughCasesInSwitch": true, + "noImplicitAny": true, + "noImplicitReturns": true, + "forceConsistentCasingInFileNames": true, + "strictNullChecks": true + }, + "files": [ + "index.ts" + ] +} diff --git a/aws-ts-eks-migrate-nodegroups/utils.ts b/aws-ts-eks-migrate-nodegroups/utils.ts new file mode 100644 index 000000000..184dc00cc --- /dev/null +++ b/aws-ts-eks-migrate-nodegroups/utils.ts @@ -0,0 +1,34 @@ +import * as aws from "@pulumi/aws"; +import * as eks from "@pulumi/eks"; +import * as pulumi from "@pulumi/pulumi"; + +// Creates an EKS NodeGroup. +interface NodeGroupArgs { + ami: string; + instanceType: pulumi.Input; + desiredCapacity: pulumi.Input; + cluster: eks.Cluster; + instanceProfile: aws.iam.InstanceProfile; + taints?: pulumi.Input; +} +export function createNodeGroup( + name: string, + args: NodeGroupArgs, +): eks.NodeGroup { + return new eks.NodeGroup(name, { + cluster: args.cluster, + nodeSecurityGroup: args.cluster.nodeSecurityGroup, + clusterIngressRule: args.cluster.eksClusterIngressRule, + instanceType: args.instanceType, + amiId: args.ami, + nodeAssociatePublicIpAddress: false, + desiredCapacity: args.desiredCapacity, + minSize: args.desiredCapacity, + maxSize: 10, + instanceProfile: args.instanceProfile, + labels: {"amiId": args.ami}, + taints: args.taints, + }, { + providers: { kubernetes: args.cluster.provider}, + }); +} diff --git a/misc/test/examples_test.go b/misc/test/examples_test.go index 7169b42b9..25a841c51 100644 --- a/misc/test/examples_test.go +++ b/misc/test/examples_test.go @@ -210,6 +210,20 @@ func TestExamples(t *testing.T) { }) }, }), + base.With(integration.ProgramTestOptions{ + Dir: path.Join(cwd, "..", "..", "aws-ts-eks-migrate-nodegroups"), + Config: map[string]string{ + "aws:region": awsRegion, + }, + ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) { + maxWait := 10 * time.Minute + endpoint := fmt.Sprintf("%s/echoserver", stack.Outputs["nginxServiceUrl"].(string)) + headers := map[string]string{"Host": "apps.example.com"} + assertHTTPResultWithRetry(t, endpoint, headers, maxWait, func(body string) bool { + return assert.NotEmpty(t, body, "Body should not be empty") + }) + }, + }), base.With(integration.ProgramTestOptions{ Dir: path.Join(cwd, "..", "..", "aws-ts-hello-fargate"), Config: map[string]string{