Skip to content

Commit

Permalink
migrating(eks): add example of migrating node groups with zero downtime
Browse files Browse the repository at this point in the history
  • Loading branch information
metral committed Jul 22, 2019
1 parent 39aec82 commit d1f81f2
Show file tree
Hide file tree
Showing 16 changed files with 1,048 additions and 6 deletions.
13 changes: 7 additions & 6 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,6 @@ before_install:
- source ${GOPATH}/src/github.com/pulumi/scripts/ci/prepare-environment.sh
- source ${PULUMI_SCRIPTS}/ci/keep-failed-tests.sh
- sudo apt-get update && sudo apt-get install -y apt-transport-https
- curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
- echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
- sudo apt-get update
- sudo apt-get install -y kubectl
install:
- source ${PULUMI_SCRIPTS}/ci/install-common-toolchain.sh
- curl -L https://get.pulumi.com/ | bash
Expand All @@ -32,10 +28,15 @@ install:
| bash
- helm init -c
- helm repo add bitnami https://charts.bitnami.com/bitnami
# Install aws-iam-authenticator
# See: https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html)
- curl -o aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.13.7/2019-06-11/bin/linux/amd64/aws-iam-authenticator
- chmod +x ./aws-iam-authenticator
- mkdir -p $HOME/bin && cp ./aws-iam-authenticator $HOME/bin/aws-iam-authenticator
-
- sudo mv aws-iam-authenticator /usr/local/bin
# Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- sudo mv kubectl /usr/local/bin
before_script:
- "${PULUMI_SCRIPTS}/ci/ensure-dependencies"
after_failure:
Expand Down
3 changes: 3 additions & 0 deletions aws-ts-eks-migrate-nodegroups/Pulumi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
name: aws-ts-eks-migrate-nodegroups
description: Creates an EKS cluster with node groups and a workload. Then covers how to add an additional node group, and use it to migrate the workload with zero downtime.
runtime: nodejs
4 changes: 4 additions & 0 deletions aws-ts-eks-migrate-nodegroups/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# examples/aws-ts-eks-migrate-nodegroups

Creates an EKS cluster with node groups and a workload, and showcases how add an
additional node group to use for workload migration with zero downtime.
161 changes: 161 additions & 0 deletions aws-ts-eks-migrate-nodegroups/echoserver.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
import * as eks from "@pulumi/eks";
import * as k8s from "@pulumi/kubernetes";
import * as pulumi from "@pulumi/pulumi";

// Create the echoserver workload's Service, Deployment and Ingress.
interface EchoserverArgs {
replicas: pulumi.Input<number>;
namespace: pulumi.Input<string>;
ingressClass: pulumi.Input<string>;
provider: k8s.Provider;
}
export function create(
name: string,
args: EchoserverArgs,
): k8s.core.v1.Service {

const labels = {app: name};

// Create the Service.
const service = createService(name, {
labels: labels,
namespace: args.namespace,
provider: args.provider,
});
const serviceName = service.metadata.name;

// Deploy the echoserver in the general, standard nodegroup.
const deployment = createDeployment(name, {
replicas: args.replicas,
labels: labels,
namespace: args.namespace,
provider: args.provider,
});

// Create the Ingress.
const ingress = createIngress(name, {
labels: labels,
namespace: args.namespace,
ingressClass: args.ingressClass,
serviceName: serviceName,
provider: args.provider,
});

return service;
}

interface EchoserverServiceArgs {
labels: pulumi.Input<any>;
namespace: pulumi.Input<string>;
provider: k8s.Provider;
}
export function createService(
name: string,
args: EchoserverServiceArgs,
): k8s.core.v1.Service {
return new k8s.core.v1.Service(
name,
{
metadata: {
labels: args.labels,
namespace: args.namespace,
},
spec: {
type: "ClusterIP",
ports: [{port: 80, protocol: "TCP", targetPort: "http"}],
selector: args.labels,
},
},
{
provider: args.provider,
},
);
}

interface EchoserverDeploymentArgs {
replicas: pulumi.Input<number>;
labels: pulumi.Input<any>;
namespace: pulumi.Input<string>;
provider: k8s.Provider;
}
export function createDeployment(
name: string,
args: EchoserverDeploymentArgs,
): k8s.apps.v1.Deployment {
return new k8s.apps.v1.Deployment(name,
{
metadata: {
labels: args.labels,
namespace: args.namespace,
},
spec: {
replicas: args.replicas,
selector: { matchLabels: args.labels },
template: {
metadata: { labels: args.labels, namespace: args.namespace },
spec: {
restartPolicy: "Always",
containers: [
{
name: name,
image: "gcr.io/google-containers/echoserver:1.5",
ports: [{ name: "http", containerPort: 8080 }],
},
],
},
},
},
},
{
provider: args.provider,
},
);
}

interface EchoserverIngressArgs {
labels: pulumi.Input<any>;
namespace: pulumi.Input<string>;
ingressClass: pulumi.Input<string>;
serviceName: pulumi.Input<string>;
provider: k8s.Provider;
}
export function createIngress(
name: string,
args: EchoserverIngressArgs,
): k8s.extensions.v1beta1.Ingress {
// TODO(metral): change to k8s.networking.v1beta.Ingress
// when EKS supports >= 1.14.
return new k8s.extensions.v1beta1.Ingress(
name,
{
metadata: {
labels: args.labels,
namespace: args.namespace,
annotations: {
"kubernetes.io/ingress.class": args.ingressClass,
},
},
spec: {
rules: [
{
host: "apps.example.com",
http: {
paths: [
{
path: "/echoserver",
backend: {
serviceName: args.serviceName,
servicePort: "http",
},
},
],
},
},
],
},
},
{
provider: args.provider,
},
);
}
51 changes: 51 additions & 0 deletions aws-ts-eks-migrate-nodegroups/iam.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import * as aws from "@pulumi/aws";
import * as pulumi from "@pulumi/pulumi";
import * as iam from "./iam";

const managedPolicyArns: string[] = [
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
];

// Creates a role and attaches the EKS worker node IAM managed policies
export function createRole(name: string): aws.iam.Role {
const role = new aws.iam.Role(name, {
assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal({
Service: "ec2.amazonaws.com",
}),
});

let counter = 0;
for (const policy of managedPolicyArns) {
// Create RolePolicyAttachment without returning it.
const rpa = new aws.iam.RolePolicyAttachment(`${name}-policy-${counter++}`,
{ policyArn: policy, role: role },
);
}

return role;
}

// Creates a collection of IAM roles.
export function createRoles(name: string, quantity: number): aws.iam.Role[] {
const roles: aws.iam.Role[] = [];

for (let i = 0; i < quantity; i++) {
roles.push(iam.createRole(`${name}-role-${i}`));
}

return roles;
}

// Creates a collection of IAM instance profiles from the given roles.
export function createInstanceProfiles(name: string, roles: aws.iam.Role[]): aws.iam.InstanceProfile[] {
const profiles: aws.iam.InstanceProfile[] = [];

for (let i = 0; i < roles.length; i++) {
const role = roles[i];
profiles.push(new aws.iam.InstanceProfile(`${name}-instanceProfile-${i}`, {role: role}));
}

return profiles;
}
83 changes: 83 additions & 0 deletions aws-ts-eks-migrate-nodegroups/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
import * as awsx from "@pulumi/awsx"; import * as eks from "@pulumi/eks";
import * as k8s from "@pulumi/kubernetes";
import * as pulumi from "@pulumi/pulumi";
import * as echoserver from "./echoserver";
import * as iam from "./iam";
import * as nginx from "./nginx";
import * as utils from "./utils";

const projectName = pulumi.getProject();

// Allocate a new VPC with custom settings, and a public & private subnet per AZ.
const vpc = new awsx.ec2.Vpc(`${projectName}`, {
cidrBlock: "172.16.0.0/16",
subnets: [{ type: "public" }, { type: "private" }],
});

// Export VPC ID and Subnets.
export const vpcId = vpc.id;
export const allVpcSubnets = vpc.privateSubnetIds.concat(vpc.publicSubnetIds);

// Create 3 IAM Roles and matching InstanceProfiles to use with the nodegroups.
const roles = iam.createRoles(projectName, 3);
const instanceProfiles = iam.createInstanceProfiles(projectName, roles);

// Create an EKS cluster.
const myCluster = new eks.Cluster(`${projectName}`, {
version: "1.13",
vpcId: vpcId,
subnetIds: allVpcSubnets,
nodeAssociatePublicIpAddress: false,
skipDefaultNodeGroup: true,
deployDashboard: false,
instanceRoles: roles,
enabledClusterLogTypes: ["api", "audit", "authenticator",
"controllerManager", "scheduler"],
});
export const kubeconfig = myCluster.kubeconfig;
export const clusterName = myCluster.core.cluster.name;

// Create a Standard node group of t2.medium workers.
const ngStandard = utils.createNodeGroup(`${projectName}-ng-standard`, {
ami: "ami-03a55127c613349a7", // k8s v1.13.7 in us-west-2
instanceType: "t2.medium",
desiredCapacity: 3,
cluster: myCluster,
instanceProfile: instanceProfiles[0],
});

// Create a 2xlarge node group of t3.2xlarge workers with taints on the nodes
// dedicated for the NGINX Ingress Controller.
const ng2xlarge = utils.createNodeGroup(`${projectName}-ng-2xlarge`, {
ami: "ami-0355c210cb3f58aa2", // k8s v1.12.7 in us-west-2
instanceType: "t3.2xlarge",
desiredCapacity: 3,
cluster: myCluster,
instanceProfile: instanceProfiles[1],
taints: {"nginx": { value: "true", effect: "NoSchedule"}},
});

// Create a Namespace for NGINX Ingress Controller and the echoserver workload.
const namespace = new k8s.core.v1.Namespace("apps", undefined, { provider: myCluster.provider });
export const namespaceName = namespace.metadata.name;

// Deploy the NGINX Ingress Controller on the specified node group.
const image: string = "quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.0";
const ingressClass: string = "my-nginx-class";
const nginxService = nginx.create("nginx-ing-cntlr", {
image: image,
replicas: 3,
namespace: namespaceName,
ingressClass: ingressClass,
provider: myCluster.provider,
nodeSelectorTermValues: ["t3.2xlarge"],
});
export const nginxServiceUrl = nginxService.status.loadBalancer.ingress[0].hostname;

// Deploy the echoserver Workload on the Standard node group.
const echoserverDeployment = echoserver.create("echoserver", {
replicas: 3,
namespace: namespaceName,
ingressClass: ingressClass,
provider: myCluster.provider,
});
Loading

0 comments on commit d1f81f2

Please sign in to comment.