From 4020ecfa58c6c381f9487c735cf96d0035f75a30 Mon Sep 17 00:00:00 2001 From: Pat Gavlin Date: Wed, 15 Aug 2018 17:53:46 -0700 Subject: [PATCH 1/4] Use the EKS package. Just what it says on the tin. These changes also elminite the instanceType config var. --- aws-ts-eks/cluster.ts | 457 -------------------------------------- aws-ts-eks/dashboard.ts | 315 -------------------------- aws-ts-eks/index.ts | 15 +- aws-ts-eks/package.json | 4 +- aws-ts-eks/serviceRole.ts | 76 ------- 5 files changed, 6 insertions(+), 861 deletions(-) delete mode 100644 aws-ts-eks/cluster.ts delete mode 100644 aws-ts-eks/dashboard.ts delete mode 100644 aws-ts-eks/serviceRole.ts diff --git a/aws-ts-eks/cluster.ts b/aws-ts-eks/cluster.ts deleted file mode 100644 index db304ffb6..000000000 --- a/aws-ts-eks/cluster.ts +++ /dev/null @@ -1,457 +0,0 @@ -import * as pulumi from "@pulumi/pulumi"; -import * as aws from "@pulumi/aws"; -import * as k8s from "@pulumi/kubernetes"; -import * as k8sInputs from "@pulumi/kubernetes/types/input"; - -import { Dashboard } from "./dashboard"; -import { ServiceRole } from "./serviceRole"; - -/** - * EBSVolumeType lists the set of volume types accepted by an EKS storage class. - */ -export type EBSVolumeType = "io1" | "gp2" | "sc1" | "st1"; - -/** - * EKSStorageClass describes the inputs to a single Kubernetes StorageClass provisioned by AWS. Any number of storage - * classes can be added to a cluster at creation time. One of these storage classes may be configured the default - * storage class for the cluster. - */ -export interface EKSStorageClass { - /** - * The EBS volume type. - */ - type: pulumi.Input; - - /** - * The AWS zone or zones for the EBS volume. If zones is not specified, volumes are generally round-robin-ed across - * all active zones where Kubernetes cluster has a node. zone and zones parameters must not be used at the same - * time. - */ - zones?: pulumi.Input[]>; - - /** - * I/O operations per second per GiB for "io1" volumes. The AWS volume plugin multiplies this with the size of a - * requested volume to compute IOPS of the volume and caps the result at 20,000 IOPS. - */ - iopsPerGb?: pulumi.Input; - - /** - * Denotes whether the EBS volume should be encrypted. - */ - encrypted?: pulumi.Input; - - /** - * The full Amazon Resource Name of the key to use when encrypting the volume. If none is supplied but encrypted is - * true, a key is generated by AWS. - */ - kmsKeyId?: pulumi.Input; - - /** - * True if this storage class should be the default storage class for the cluster. - */ - default?: pulumi.Input; - - /** - * AllowVolumeExpansion shows whether the storage class allow volume expand - */ - allowVolumeExpansion?: pulumi.Input - - /** - * Standard object's metadata. More info: - * https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - */ - metadata?: pulumi.Input - - /** - * Dynamically provisioned PersistentVolumes of this storage class are created with these - * mountOptions, e.g. ["ro", "soft"]. Not validated - mount of the PVs will simply fail if one - * is invalid. - */ - mountOptions?: pulumi.Input - - /** - * Dynamically provisioned PersistentVolumes of this storage class are created with this - * reclaimPolicy. Defaults to Delete. - */ - reclaimPolicy?: pulumi.Input - - /** - * VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. - * When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored - * by servers that enable the VolumeScheduling feature. - */ - volumeBindingMode?: pulumi.Input -} - -/** - * EKSClusterOptions describes the configuration options accepted by an EKSCluster component. - */ -export interface EKSClusterOptions { - /** - * The VPC in which to create the cluster and its worker nodes. - */ - vpcId: pulumi.Input; - - /** - * The subnets to attach to the EKS cluster. - */ - subnetIds: pulumi.Input[]>; - - /** - * The instance type to use for the cluster's nodes. - */ - instanceType: pulumi.Input; - - /** - * The number of worker nodes that should be running in the cluster. - */ - desiredCapacity: pulumi.Input; - - /** - * The minimum number of worker nodes running in the cluster. - */ - minSize: pulumi.Input; - - /** - * The maximum number of worker nodes running in the cluster. - */ - maxSize: pulumi.Input; - - /** - * An optional set of StorageClasses to enable for the cluster. If this is a single volume type rather than a map, - * a single StorageClass will be created for that volume type and made the cluster's default StorageClass. - */ - storageClasses?: { [name: string]: EKSStorageClass } | EBSVolumeType; - - /** - * Whether or not to deploy the Kubernetes dashboard to the cluster. If the dashboard is deployed, it can be - * accessed as follows: - * 1. Retrieve an authentication token for the dashboard by running the following and copying the value of `token` - * from the output of the last command: - * - * $ kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}' - * $ kubectl -n kube-system describe secret - * - * 2. Start the kubectl proxt: - * - * $ kubectl proxy - * - * 3. Open `http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/` in a - * web browser. - * 4. Choose `Token` authentication, paste the token retrieved earlier into the `Token` field, and sign in. - */ - deployDashboard?: boolean; -} - -// createStorageClass creates a single StorageClass from the given inputs. -function createStorageClass(name: string, storageClass: EKSStorageClass, opts: pulumi.CustomResourceOptions) { - // Compute the storage class's metadata, including its name and default storage class annotation. - const metadata = pulumi.all([storageClass.metadata || {}, storageClass.default]) - .apply(([m, isDefault]) => { - m.name = m.name || name; - if (isDefault) { - m.annotations = { ...m.annotations || {}, "storageclass.kubernetes.io/is-default-class":"true" }; - } - return m; - }); - - // Figure out the parameters for the storage class. - const parameters: { [key: string]: pulumi.Input } = { - "type": storageClass.type, - }; - if (storageClass.zones) { - parameters["zones"] = pulumi.output(storageClass.zones).apply(v => v.join(", ")); - } - if (storageClass.iopsPerGb) { - parameters["iopsPerGb"] = pulumi.output(storageClass.iopsPerGb).apply(v => `${v}`); - } - if (storageClass.encrypted) { - parameters["encrypted"] = pulumi.output(storageClass.encrypted).apply(v => `${v}`); - } - if (storageClass.kmsKeyId) { - parameters["kmsKeyId"] = storageClass.kmsKeyId; - } - - new k8s.storage.v1.StorageClass(name, { - metadata: metadata, - provisioner: "kubernetes.io/aws-ebs", - parameters: parameters, - allowVolumeExpansion: storageClass.allowVolumeExpansion, - mountOptions: storageClass.mountOptions, - reclaimPolicy: storageClass.reclaimPolicy, - volumeBindingMode: storageClass.volumeBindingMode, - }, opts); -} - -/** - * EKSCluster is a component that wraps the AWS and Kubernetes resources necessary to run an EKS cluster, its worker - * nodes, its optional StorageClasses, and an optional deployment of the Kubernetes Dashboard. - */ -export class EKSCluster extends pulumi.ComponentResource { - /** - * A kubeconfig that can be used to connect to the EKS cluster. This must be serialized as a string before passing - * to the Kubernetes provider. - */ - public readonly kubeconfig: pulumi.Output; - - /** - * Create a new EKS cluster with worker nodes, optional storage classes, and deploy the Kubernetes Dashboard if - * requested. - * - * @param name The _unique_ name of this component. - * @param args The arguments for this cluster. - * @param opts A bag of options that control this copmonent's behavior. - */ - constructor(name: string, args: EKSClusterOptions, opts?: pulumi.ComponentResourceOptions) { - super("EKSCluster", name, args, opts); - - // Create the EKS service role - const eksRole = new ServiceRole("eksRole", { - service: "eks.amazonaws.com", - description: "Allows EKS to manage clusters on your behalf.", - managedPolicyArns: [ - "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", - "arn:aws:iam::aws:policy/AmazonEKSServicePolicy", - ], - }, { parent: this }); - - // Create the EKS cluster security group - const allEgress = { - description: "Allow internet access.", - fromPort: 0, - toPort: 0, - protocol: "-1", // all - cidrBlocks: [ "0.0.0.0/0" ], - }; - const eksClusterSecurityGroup = new aws.ec2.SecurityGroup("eksClusterSecurityGroup", { - vpcId: args.vpcId, - egress: [ allEgress ], - }, { parent: this }); - - // Create the EKS cluster - const eksCluster = new aws.eks.Cluster("eksCluster", { - roleArn: eksRole.role.apply(r => r.arn), - vpcConfig: { securityGroupIds: [ eksClusterSecurityGroup.id ], subnetIds: args.subnetIds }, - }, { parent: this }); - - // Create the instance role we'll use for worker nodes. - const instanceRole = new ServiceRole("instanceRole", { - service: "ec2.amazonaws.com", - managedPolicyArns: [ - "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", - "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", - "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", - ], - }, { parent: this }); - const instanceRoleARN = instanceRole.role.apply(r => r.arn); - - // Compute the required kubeconfig. Note that we do not export this value: we want the exported config to - // depend on the autoscaling group we'll create later so that nothing attempts to use the EKS cluster before - // its worker nodes have come up. - const myKubeconfig = pulumi.all([eksCluster.name, eksCluster.endpoint, eksCluster.certificateAuthority]) - .apply(([clusterName, clusterEndpoint, clusterCertificateAuthority]) => { - return { - apiVersion: "v1", - clusters: [{ - cluster: { - server: clusterEndpoint, - "certificate-authority-data": clusterCertificateAuthority.data, - }, - name: "kubernetes", - }], - contexts: [{ - context: { - cluster: "kubernetes", - user: "aws", - }, - name: "aws", - }], - "current-context": "aws", - kind: "Config", - users: [{ - name: "aws", - user: { - exec: { - apiVersion: "client.authentication.k8s.io/v1alpha1", - command: "aws-iam-authenticator", - args: ["token", "-i", clusterName], - }, - }, - }], - }; - }); - - // Create the Kubernetes provider we'll use to manage the config map we need to allow worker nodes to access - // the EKS cluster. - const k8sProvider = new k8s.Provider("eks-k8s", { - kubeconfig: myKubeconfig.apply(JSON.stringify), - }, { parent: this }); - - // Enable access to the EKS cluster for worker nodes. - const eksNodeAccess = new k8s.core.v1.ConfigMap("nodeAccess", { - apiVersion: "v1", - metadata: { - name: "aws-auth", - namespace: "kube-system", - }, - data: { - mapRoles: instanceRoleARN.apply(arn => `- rolearn: ${arn}\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n`), - }, - }, { parent: this, provider: k8sProvider }); - - // Add any requested StorageClasses. - if (args.storageClasses) { - if (typeof args.storageClasses === "string") { - const storageClass = { type: args.storageClasses, default: true }; - createStorageClass(args.storageClasses, storageClass, { parent: this, provider: k8sProvider }); - } else { - for (const name of Object.keys(args.storageClasses)) { - createStorageClass(name, args.storageClasses[name], { parent: this, provider: k8sProvider }); - } - } - } - - // Create the cluster's worker nodes. - const instanceProfile = new aws.iam.InstanceProfile("instanceProfile", { - role: instanceRole.role, - }, { parent: this }); - const instanceSecurityGroup = new aws.ec2.SecurityGroup("instanceSecurityGroup", { - vpcId: args.vpcId, - ingress: [ - { - description: "Allow nodes to communicate with each other", - fromPort: 0, - toPort: 0, - protocol: "-1", // all - self: true, - }, - { - description: "Allow worker Kubelets and pods to receive communication from the cluster control plane", - fromPort: 1025, - toPort: 65535, - protocol: "tcp", - securityGroups: [ eksClusterSecurityGroup.id ], - }, - ], - egress: [ allEgress ], - tags: eksCluster.name.apply(n => { - [`kubernetes.io/cluster/${n}`]: "owned", - }), - }, { parent: this }); - const eksClusterIngressRule = new aws.ec2.SecurityGroupRule("eksClusterIngressRule", { - description: "Allow pods to communicate with the cluster API Server", - type: "ingress", - fromPort: 443, - toPort: 443, - protocol: "tcp", - securityGroupId: eksClusterSecurityGroup.id, - sourceSecurityGroupId: instanceSecurityGroup.id, - }, { parent: this }); - const instanceSecurityGroupId = pulumi.all([instanceSecurityGroup.id, eksClusterIngressRule.id]) - .apply(([instanceSecurityGroupId]) => instanceSecurityGroupId); - - const awsRegion = pulumi.output(aws.getRegion({}, { parent: this })); - const userdata = pulumi.all([awsRegion, eksCluster.name, eksCluster.endpoint, eksCluster.certificateAuthority]) - .apply(([region, clusterName, clusterEndpoint, clusterCertificateAuthority]) => { - return `#!/bin/bash -xe - -CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki -CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt -mkdir -p $CA_CERTIFICATE_DIRECTORY -echo "${clusterCertificateAuthority.data}" | base64 -d > $CA_CERTIFICATE_FILE_PATH -INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) -sed -i s,MASTER_ENDPOINT,${clusterEndpoint},g /var/lib/kubelet/kubeconfig -sed -i s,CLUSTER_NAME,${clusterName},g /var/lib/kubelet/kubeconfig -sed -i s,REGION,${region.name},g /etc/systemd/system/kubelet.service -sed -i s,MAX_PODS,20,g /etc/systemd/system/kubelet.service -sed -i s,MASTER_ENDPOINT,${clusterEndpoint},g /etc/systemd/system/kubelet.service -sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service -DNS_CLUSTER_IP=10.100.0.10 -if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi -sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service -sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig -sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service -systemctl daemon-reload -systemctl restart kubelet kube-proxy -`; - }); - const eksWorkerAmi = aws.getAmi({ - filters: [{ - name: "name", - values: [ "eks-worker-*" ], - }], - mostRecent: true, - owners: [ "602401143452" ], // Amazon - }, { parent: this }); - const instanceLaunchConfiguration = new aws.ec2.LaunchConfiguration("instanceLaunchConfiguration", { - associatePublicIpAddress: true, - imageId: eksWorkerAmi.then(r => r.imageId), - instanceType: args.instanceType, - iamInstanceProfile: instanceProfile.id, - securityGroups: [ instanceSecurityGroupId ], - userData: userdata, - }, { parent: this }); - const autoscalingGroup = new aws.autoscaling.Group("autoscalingGroup", { - desiredCapacity: args.desiredCapacity, - launchConfiguration: instanceLaunchConfiguration.id, - maxSize: args.maxSize, - minSize: args.minSize, - vpcZoneIdentifiers: args.subnetIds, - - tags: [ - { - key: eksCluster.name.apply(n => `kubernetes.io/cluster/${n}`), - value: "owned", - propagateAtLaunch: true, - }, - { - key: "Name", - value: eksCluster.name.apply(n => `${n}-worker`), - propagateAtLaunch: true, - } - ] - - }, { parent: this, dependsOn: eksNodeAccess }); - - // Export the cluster's kubeconfig with a dependency upon the cluster's autoscaling group. This will help - // ensure that the cluster's consumers do not attempt to use the cluster until its workers are attached. - this.kubeconfig = pulumi.all([autoscalingGroup.id, myKubeconfig]).apply(([_, kubeconfig]) => kubeconfig); - - // If we need to deploy the Kubernetes dashboard, do so now. - if (args.deployDashboard) { - // Deploy the dashboard and its dependencies. - const dashboard = new Dashboard("dashboard", { - parent: this, - dependsOn: autoscalingGroup, - providers: { kubernetes: k8sProvider }, - }); - - // Create a service account for admin access. - const adminAccount = new k8s.core.v1.ServiceAccount("eks-admin", { - metadata: { - name: "eks-admin", - namespace: "kube-system", - }, - }, { parent: this, dependsOn: autoscalingGroup, provider: k8sProvider }); - - // Create a role binding for the admin account. - const adminRoleBinding = new k8s.rbac.v1.ClusterRoleBinding("eks-admin", { - metadata: { - name: "eks-admin", - }, - roleRef: { - apiGroup: "rbac.authorization.k8s.io", - kind: "ClusterRole", - name: "cluster-admin", - }, - subjects: [{ - kind: "ServiceAccount", - name: "eks-admin", - namespace: "kube-system", - }], - }, { parent: this, dependsOn: autoscalingGroup, provider: k8sProvider }); - } - - this.registerOutputs({ kubeconfig: this.kubeconfig }); - } -} diff --git a/aws-ts-eks/dashboard.ts b/aws-ts-eks/dashboard.ts deleted file mode 100644 index b90c45b06..000000000 --- a/aws-ts-eks/dashboard.ts +++ /dev/null @@ -1,315 +0,0 @@ -import * as pulumi from "@pulumi/pulumi"; -import * as k8s from "@pulumi/kubernetes"; - -/** - * The Dashboard component creates a deployment of the Kubernetes Dashboard using the best practices listed at - * https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html. - */ -export class Dashboard extends pulumi.ComponentResource { - /** - * Creates an instance of the Dashboard component. - * - * @param name The _unique_ name of the component. - * @param opts A bag of options that control the behavior of this component and its children. - */ - constructor(name: string, opts?: pulumi.ComponentResourceOptions) { - super("Dashboard", name, {}, opts); - - // Create the dashboard secret - const dashboard = new k8s.core.v1.Secret("dashboard-certs", { - metadata: { - labels: { "k8s-app": "kubernetes-dashboard" }, - name: "kubernetes-dashboard-certs", - namespace: "kube-system", - }, - type: "Opaque", - }, { parent: this }); - - // Create the dashboard service account - const dashboardAccount = new k8s.core.v1.ServiceAccount("kubernetes-dashboard", { - metadata: { - labels: { "k8s-app": "kubernetes-dashboard" }, - name: "kubernetes-dashboard", - namespace: "kube-system", - }, - }, { parent: this }); - - // Dashboard role and role binding - const dashboardRole = new k8s.rbac.v1.Role("kubernetes-dashboard-minimal", { - metadata: { - name: "kubernetes-dashboard-minimal", - namespace: "kube-system", - }, - rules: [ - { - // Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. - apiGroups: [ "" ], - resources: [ "secrets" ], - verbs: [ "create" ], - }, - { - // Allow Dashboard to create 'kubernetes-dashboard-settings' config map. - apiGroups: [ "" ], - resources: [ "configmaps" ], - verbs: [ "create" ], - }, - { - // Allow Dashboard to get, update and delete Dashboard exclusive secrets. - apiGroups: [ "" ], - resources: [ "secrets" ], - resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs" ], - verbs: [ "get", "update", "delete" ], - }, - { - // Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - apiGroups: [ "" ], - resources: [ "configmaps" ], - resourceNames: [ "kubernetes-dashboard-settings" ], - verbs: [ "get", "update" ], - }, - { - // Allow Dashboard to get metrics from heapster. - apiGroups: [ "" ], - resources: [ "services" ], - resourceNames: [ "heapster" ], - verbs: [ "proxy" ], - }, - { - apiGroups: [ "" ], - resources: [ "services/proxy" ], - resourceNames: [ "heapster", "http:heapster:", "https:heapster:" ], - verbs: [ "get" ], - }, - ], - }, { parent: this }); - const dashboardRoleBinding = new k8s.rbac.v1.RoleBinding("kubernetes-dashboard-minimal", { - metadata: { - name: "kubernetes-dashboard-minimal", - namespace: "kube-system", - }, - roleRef: { - apiGroup: "rbac.authorization.k8s.io", - kind: "Role", - name: "kubernetes-dashboard-minimal", - }, - subjects: [{ - kind: "ServiceAccount", - name: "kubernetes-dashboard", - namespace: "kube-system", - }], - }, { parent: this }); - - // Dashboard deployment - const dashboardDeployment = new k8s.apps.v1.Deployment("kubernetes-dashboard", { - metadata: { - labels: { "k8s-app": "kubernetes-dashboard" }, - name: "kubernetes-dashboard", - namespace: "kube-system", - }, - spec: { - replicas: 1, - revisionHistoryLimit: 10, - selector: { - matchLabels: { "k8s-app": "kubernetes-dashboard" } - }, - template: { - metadata: { - labels: { "k8s-app": "kubernetes-dashboard" }, - }, - spec: { - containers: [{ - name: "kubernetes-dashboard", - image: "k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3", - ports: [{ - containerPort: 8443, - protocol: "TCP", - }], - args: [ "--auto-generate-certificates" ], - volumeMounts: [ - { - name: "kubernetes-dashboard-certs", - mountPath: "/certs", - }, - { - // Create on-disk volume to store exec logs - name: "tmp-volume", - mountPath: "/tmp", - }, - ], - livenessProbe: { - httpGet: { - scheme: "HTTPS", - path: "/", - port: 8443, - }, - initialDelaySeconds: 30, - timeoutSeconds: 30, - }, - }], - volumes: [ - { - name: "kubernetes-dashboard-certs", - secret: { secretName: "kubernetes-dashboard-certs" }, - }, - { - name: "tmp-volume", - emptyDir: {}, - }, - ], - serviceAccountName: "kubernetes-dashboard", - tolerations: [{ - key: "node-role.kubernetes.io/master", - effect: "NoSchedule", - }], - }, - }, - }, - }, { parent: this }); - - // Dashboard service - const dashboardService = new k8s.core.v1.Service("kubernetes-dashboard", { - metadata: { - labels: { "k8s-app": "kubernetes-dashboard" }, - name: "kubernetes-dashboard", - namespace: "kube-system", - }, - spec: { - ports: [{ - port: 443, - targetPort: 8443, - }], - selector: { "k8s-app": "kubernetes-dashboard" }, - }, - }, { parent: this }); - - // Heapster service account - const heapsterAccount = new k8s.core.v1.ServiceAccount("heapster", { - metadata: { - name: "heapster", - namespace: "kube-system", - }, - }, { parent: this }); - - // Heapster deployment - const heapsterDeployment = new k8s.apps.v1beta1.Deployment("kubernetes-heapster", { - metadata: { - name: "heapster", - namespace: "kube-system", - }, - spec: { - replicas: 1, - template: { - metadata: { - labels: { - "task": "monitoring", - "k8s-app": "heapster", - }, - }, - spec: { - containers: [{ - name: "heapster", - image: "k8s.gcr.io/heapster-amd64:v1.5.4", - imagePullPolicy: "IfNotPresent", - command: [ - "/heapster", - "--source=kubernetes:https://kubernetes.default", - "--sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086", - ], - }], - serviceAccountName: "heapster", - }, - }, - }, - }, { parent: this }); - - // Heapster service - const heapsterService = new k8s.core.v1.Service("heapster", { - metadata: { - labels: { - "task": "monitoring", - "kubernetes.io/cluster-service": "true", - "kubernetes.io/name": "Heapster", - }, - name: "heapster", - namespace: "kube-system", - }, - spec: { - ports: [{ - port: 80, - targetPort: 8082, - }], - selector: { "k8s-app": "heapster" }, - }, - }, { parent: this }); - - // influxdb deployment - const influxdbDeployment = new k8s.apps.v1beta1.Deployment("monitoring-influxdb", { - metadata: { - name: "monitoring-influxdb", - namespace: "kube-system", - }, - spec: { - replicas: 1, - template: { - metadata: { - labels: { - "task": "monitoring", - "k8s-app": "influxdb", - }, - }, - spec: { - containers: [{ - name: "influxdb", - image: "k8s.gcr.io/heapster-influxdb-amd64:v1.5.2", - volumeMounts: [{ - mountPath: "/data", - name: "influxdb-storage", - }], - }], - volumes: [{ - name: "influxdb-storage", - emptyDir: {}, - }], - }, - }, - }, - }, { parent: this }); - - // influxdb service - const influxdbService = new k8s.core.v1.Service("monitoring-influxdb", { - metadata: { - labels: { - "task": "monitoring", - "kubernetes.io/cluster-service": "true", - "kubernetes.io/name": "monitoring-influxdb", - }, - name: "monitoring-influxdb", - namespace: "kube-system", - }, - spec: { - ports: [{ - port: 8086, - targetPort: 8086, - }], - selector: { "k8s-app": "influxdb" }, - }, - }, { parent: this }); - - // influxdb role binding - const influxdbRoleBinding = new k8s.rbac.v1.ClusterRoleBinding("heapster", { - metadata: { - name: "heapster", - }, - roleRef: { - apiGroup: "rbac.authorization.k8s.io", - kind: "ClusterRole", - name: "system:heapster", - }, - subjects: [{ - kind: "ServiceAccount", - name: "heapster", - namespace: "kube-system", - }], - }, { parent: this }); - } -} diff --git a/aws-ts-eks/index.ts b/aws-ts-eks/index.ts index d71853f6f..eb309c909 100644 --- a/aws-ts-eks/index.ts +++ b/aws-ts-eks/index.ts @@ -1,21 +1,14 @@ -import * as pulumi from "@pulumi/pulumi"; -import * as aws from "@pulumi/aws"; import * as awsinfra from "@pulumi/aws-infra"; -import * as k8s from "@pulumi/kubernetes"; - -import { EKSCluster } from "./cluster"; - -const config = new pulumi.Config(); -const instanceType = (config.get("instanceType") || "t2.medium") as aws.ec2.InstanceType; +import * as eks from "@pulumi/eks"; // Create a VPC for our cluster. -const network = new awsinfra.Network("eksNetwork"); +const vpc = new awsinfra.Network("vpc"); // Create the EKS cluster itself, including a "gp2"-backed StorageClass and a dpeloyment of the Kubernetes dashboard. -const cluster = new EKSCluster("eksCluster", { +const cluster = new EKSCluster("cluster", { vpcId: network.vpcId, subnetIds: network.subnetIds, - instanceType: instanceType, + instanceType: "t2.medium", desiredCapacity: 2, minSize: 1, maxSize: 2, diff --git a/aws-ts-eks/package.json b/aws-ts-eks/package.json index 0d05ee9aa..3a63f69e9 100644 --- a/aws-ts-eks/package.json +++ b/aws-ts-eks/package.json @@ -6,8 +6,8 @@ }, "dependencies": { "@pulumi/pulumi": "^0.15.0", - "@pulumi/aws": "^0.15.0", "@pulumi/aws-infra": "^0.15.0", - "@pulumi/kubernetes": "^0.15.0" + "@pulumi/eks": "^0.15.0-dev", + "sync-request": "^6.0.0" } } diff --git a/aws-ts-eks/serviceRole.ts b/aws-ts-eks/serviceRole.ts deleted file mode 100644 index 14d0e8481..000000000 --- a/aws-ts-eks/serviceRole.ts +++ /dev/null @@ -1,76 +0,0 @@ -import * as pulumi from "@pulumi/pulumi"; -import * as aws from "@pulumi/aws"; -import * as crypto from "crypto"; - -// sha1hash returns a partial SHA1 hash of the input string. -function sha1hash(s: string): string { - const shasum: crypto.Hash = crypto.createHash("sha1"); - shasum.update(s); - // Limit the size of hashes to ensure we generate shorter/ resource names. - return shasum.digest("hex").substring(0, 8); -} - -/** - * ServiceRoleArgs describe the parameters to a ServiceRole component. - */ -export interface ServiceRoleArgs { - /** - * The service associated with this role. - */ - readonly service: pulumi.Input; - /** - * The description of the role. - */ - readonly description?: pulumi.Input; - /** - * One or more managed policy ARNs to attach to this role. - */ - readonly managedPolicyArns?: string[]; -} - -/** - * The ServiceRole component creates an IAM role for a particular service and attaches to it a list of well-known - * managed policies. - */ -export class ServiceRole extends pulumi.ComponentResource { - // The service role. - public readonly role: pulumi.Output; - - /** - * Create a new ServiceRole. - * - * @param name The _unique_ name of this component. - * @param args The arguments for this cluster. - * @param opts A bag of options that control this copmonent's behavior. - */ - constructor(name: string, args: ServiceRoleArgs, opts?: pulumi.ResourceOptions) { - super("ServiceRole", name, args, opts); - - const assumeRolePolicy = pulumi.output(args.service).apply(service => JSON.stringify({ - Version: "2012-10-17", - Statement: [{ - Action: [ - "sts:AssumeRole", - ], - Effect: "Allow", - Principal: { - Service: [ service ], - }, - }], - })); - const role = new aws.iam.Role(`${name}-role`, { - description: args.description, - assumeRolePolicy: assumeRolePolicy, - }, { parent: this }); - const rolePolicyAttachments = []; - for (const policy of (args.managedPolicyArns || [])) { - rolePolicyAttachments.push(new aws.iam.RolePolicyAttachment(`${name}-${sha1hash(policy)}`, { - policyArn: policy, - role: role, - }, { parent: this })); - } - this.role = pulumi.all([role.arn, ...rolePolicyAttachments.map(r => r.id)]).apply(() => role); - - this.registerOutputs({ role: this.role }); - } -} From f10015cd87e96356d7291a5b73ab3236b066e14a Mon Sep 17 00:00:00 2001 From: Pat Gavlin Date: Fri, 17 Aug 2018 09:55:10 -0700 Subject: [PATCH 2/4] Remove a package reference. --- aws-ts-eks/package.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aws-ts-eks/package.json b/aws-ts-eks/package.json index 3a63f69e9..5a2b3bd4c 100644 --- a/aws-ts-eks/package.json +++ b/aws-ts-eks/package.json @@ -7,7 +7,6 @@ "dependencies": { "@pulumi/pulumi": "^0.15.0", "@pulumi/aws-infra": "^0.15.0", - "@pulumi/eks": "^0.15.0-dev", - "sync-request": "^6.0.0" + "@pulumi/eks": "^0.15.0-dev" } } From b55e67a7ecb891f00b61b8bd0b51a7cb11c756c6 Mon Sep 17 00:00:00 2001 From: Pat Gavlin Date: Fri, 17 Aug 2018 10:05:12 -0700 Subject: [PATCH 3/4] Fix some typos. --- aws-ts-eks/index.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws-ts-eks/index.ts b/aws-ts-eks/index.ts index eb309c909..10b816993 100644 --- a/aws-ts-eks/index.ts +++ b/aws-ts-eks/index.ts @@ -5,9 +5,9 @@ import * as eks from "@pulumi/eks"; const vpc = new awsinfra.Network("vpc"); // Create the EKS cluster itself, including a "gp2"-backed StorageClass and a dpeloyment of the Kubernetes dashboard. -const cluster = new EKSCluster("cluster", { - vpcId: network.vpcId, - subnetIds: network.subnetIds, +const cluster = new eks.Cluster("cluster", { + vpcId: vpc.vpcId, + subnetIds: vpc.subnetIds, instanceType: "t2.medium", desiredCapacity: 2, minSize: 1, From 031ca5731423f41d2ad1b527dcb48e5c580fe889 Mon Sep 17 00:00:00 2001 From: Pat Gavlin Date: Fri, 17 Aug 2018 10:17:24 -0700 Subject: [PATCH 4/4] Explicitly reference "dev". --- aws-ts-eks/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws-ts-eks/package.json b/aws-ts-eks/package.json index 5a2b3bd4c..25b160499 100644 --- a/aws-ts-eks/package.json +++ b/aws-ts-eks/package.json @@ -7,6 +7,6 @@ "dependencies": { "@pulumi/pulumi": "^0.15.0", "@pulumi/aws-infra": "^0.15.0", - "@pulumi/eks": "^0.15.0-dev" + "@pulumi/eks": "dev" } }