diff --git a/gcp-py-gke/.gitignore b/gcp-py-gke/.gitignore new file mode 100644 index 000000000..ae3c17260 --- /dev/null +++ b/gcp-py-gke/.gitignore @@ -0,0 +1 @@ +/bin/ diff --git a/gcp-py-gke/Pulumi.yaml b/gcp-py-gke/Pulumi.yaml new file mode 100644 index 000000000..416b7e6a5 --- /dev/null +++ b/gcp-py-gke/Pulumi.yaml @@ -0,0 +1,3 @@ +name: gcp-py-gke +description: A Google Kubernetes Engine (GKE) cluster, with canary deployment +runtime: python diff --git a/gcp-py-gke/README.md b/gcp-py-gke/README.md new file mode 100644 index 000000000..4a3842229 --- /dev/null +++ b/gcp-py-gke/README.md @@ -0,0 +1,99 @@ +[![Deploy](https://get.pulumi.com/new/button.svg)](https://app.pulumi.com/new) + +# Google Kubernetes Engine (GKE) with a Canary Deployment + +This example provisions a [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/) cluster, using +infrastructure-as-code, and then deploys a Kubernetes Deployment into it, to test that the cluster is working. This +demonstrates that you can manage both the Kubernetes objects themselves, in addition to underlying cloud infrastructure, +using a single configuration language (in this case, Python), tool, and workflow. + +# Prerequisites + +Ensure you have [Python 3](https://www.python.org/downloads/) and [the Pulumi CLI](https://pulumi.io/install). + +We will be deploying to Google Cloud Platform (GCP), so you will need an account. If you don't have an account, +[sign up for free here](https://cloud.google.com/free/). In either case, +[follow the instructions here](https://pulumi.io/quickstart/gcp/setup.html) to connect Pulumi to your GCP account. + +This example assumes that you have GCP's `gcloud` CLI on your path. This is installed as part of the +[GCP SDK](https://cloud.google.com/sdk/). + +# Running the Example + +After cloning this repo, `cd` into it and run these commands. A GKE Kubernetes cluster will appear! + +1. Create a new stack, which is an isolated deployment target for this example: + + ```bash + $ pulumi stack init dev + ``` + +2. Set the required configuration variables for this program: + + ```bash + $ pulumi config set gcp:project [your-gcp-project-here] + $ pulumi config set gcp:zone us-west1-a # any valid GCP zone here + $ pulumi config set password --secret [your-cluster-password-here] + ``` + + By default, your cluster will have 3 nodes of type `n1-standard-1`. This is configurable, however; for instance + if we'd like to choose 5 nodes of type `n1-standard-2` instead, we can run these commands: + + ```bash + $ pulumi config set node_count 5 + $ pulumi config set node_machine_type n1-standard-2 + ``` + + This shows how stacks can be configurable in useful ways. You can even change these after provisioning. + +3. Deploy everything with the `pulumi up` command. This provisions all the GCP resources necessary, including + your GKE cluster itself, and then deploys a Kubernetes Deployment running nginx, all in a single gesture: + + ```bash + $ pulumi up + ``` + + This will show you a preview, ask for confirmation, and then chug away at provisioning your cluster: + + ``` + Updating stack 'gcp-ts-gke-dev' + Performing changes: + + Type Name Plan + + pulumi:pulumi:Stack gcp-py-dev create + + ├─ gcp:container:Cluster gke-cluster create + + ├─ pulumi:providers:kubernetes gkeK8s create + + └─ kubernetes:apps:Deployment canary create + + ---outputs:--- + kubeConfig: "apiVersion: v1\n..." + + info: 4 changes updated: + + 4 resources created + Update duration: 2m07.424737735s + ``` + + After about two minutes, your cluster will be ready, and its config will be printed. + +4. From here, you may take this config and use it either in your `~/.kube/config` file, or just by saving it + locally and plugging it into the `KUBECONFIG` envvar. All of your usual `gcloud` commands will work too, of course. + + For instance: + + ```bash + $ pulumi stack output kubeconfig > kubeconfig.yaml + $ KUBECONFIG=./kubeconfig.yaml kubectl get po + NAME READY STATUS RESTARTS AGE + canary-n7wfhtrp-fdbfd897b-lrm58 1/1 Running 0 58s + ``` + +5. At this point, you have a running cluster. Feel free to modify your program, and run `pulumi up` to redeploy changes. + The Pulumi CLI automatically detects what has changed and makes the minimal edits necessary to accomplish these + changes. This could be altering the existing chart, adding new GCP or Kubernetes resources, or anything, really. + +6. Once you are done, you can destroy all of the resources, and the stack: + + ```bash + $ pulumi destroy + $ pulumi stack rm + ``` diff --git a/gcp-py-gke/__main__.py b/gcp-py-gke/__main__.py new file mode 100644 index 000000000..0d8f20b61 --- /dev/null +++ b/gcp-py-gke/__main__.py @@ -0,0 +1,84 @@ +from pulumi import Config, export, get_project, get_stack, Output, ResourceOptions +from pulumi_gcp.config import project, zone +from pulumi_gcp.container import Cluster +from pulumi_kubernetes import Provider +from pulumi_kubernetes.apps.v1 import Deployment + +# Read in some configurable settings for our cluster: +config = Config(None) + +# nodeCount is the number of cluster nodes to provision. Defaults to 3 if unspecified. +NODE_COUNT = config.get('node_count') or 3 +# nodeMachineType is the machine type to use for cluster nodes. Defaults to n1-standard-1 if unspecified. +# See https://cloud.google.com/compute/docs/machine-types for more details on available machine types. +NODE_MACHINE_TYPE = config.get('node_machine_type') or 'n1-standard-1' +# username is the admin username for the cluster. +USERNAME = config.get('username') or 'admin' +# password is the password for the admin user in the cluster. +PASSWORD = config.require('password') + +# Now, actually create the GKE cluster. +k8s_cluster = Cluster('gke-cluster', + initial_node_count=NODE_COUNT, + node_version='latest', + min_master_version='latest', + master_auth={ 'username': USERNAME, 'password': PASSWORD }, + node_config={ + 'machine_type': NODE_MACHINE_TYPE, + 'oauth_scopes': [ + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/devstorage.read_only', + 'https://www.googleapis.com/auth/logging.write', + 'https://www.googleapis.com/auth/monitoring' + ], + }, +) + +# Manufacture a GKE-style Kubeconfig. Note that this is slightly "different" because of the way GKE requires +# gcloud to be in the picture for cluster authentication (rather than using the client cert/key directly). +k8s_info = Output.all(k8s_cluster.name, k8s_cluster.endpoint, k8s_cluster.master_auth) +k8s_config = k8s_info.apply( + lambda info: """apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: {0} + server: https://{1} + name: {2} +contexts: +- context: + cluster: {2} + user: {2} + name: {2} +current-context: {2} +kind: Config +preferences: {{}} +users: +- name: {2} + user: + auth-provider: + config: + cmd-args: config config-helper --format=json + cmd-path: gcloud + expiry-key: '{{.credential.token_expiry}}' + token-key: '{{.credential.access_token}}' + name: gcp +""".format(info[2]['clusterCaCertificate'], info[1], '{0}_{1}_{2}'.format(project, zone, info[0]))) + +# Make a Kubernetes provider instance that uses our cluster from above. +k8s_provider = Provider('gke_k8s', kubeconfig=k8s_config) + +# Create a canary deployment to test that this cluster works. +labels = { 'app': 'canary-{0}-{1}'.format(get_project(), get_stack()) } +canary = Deployment('canary', + spec={ + 'selector': { 'matchLabels': labels }, + 'replicas': 1, + 'template': { + 'metadata': { 'labels': labels }, + 'spec': { 'containers': [{ 'name': 'nginx', 'image': 'nginx' }] }, + }, + }, __opts__=ResourceOptions(provider=k8s_provider) +) + +# Finally, export the kubeconfig so that the client can easily access the cluster. +export('kubeconfig', k8s_config) diff --git a/gcp-py-gke/requirements.txt b/gcp-py-gke/requirements.txt new file mode 100644 index 000000000..96481a204 --- /dev/null +++ b/gcp-py-gke/requirements.txt @@ -0,0 +1,3 @@ +pulumi>=0.17.4 +pulumi_gcp>=0.18.2 +pulumi_kubernetes>=0.22.0