Skip to content

Commit

Permalink
Initial Azure k8s orchestration/automation
Browse files Browse the repository at this point in the history
  - creates an Azure k8s/AKS cluster with a variable number of nodes
    using a single nodepool. There is currently no VM config/kernel-tweaks
    applied to the VMs.
  • Loading branch information
JONBRWN committed Aug 13, 2020
1 parent b03be2c commit fe33ba3
Show file tree
Hide file tree
Showing 7 changed files with 262 additions and 2 deletions.
51 changes: 50 additions & 1 deletion orchestration/pulumi/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,14 @@ nsg_name="$(cluster_name)-$(location)_nsg"
vmss_name="$(cluster_name)-$(location)_vmss"
vm_name="$(cluster_name)-$(location)_vm"
ppg_name="$(cluster_name)-$(location)_ppg"
app_name="$(cluster_name)-k8s-$(location)_app"
np_name="default"
sp_name="$(cluster_name)-k8s-$(location)_sp"
app_name="$(cluster_name)-k8s-$(location)_app"
k8s_dns_prefix="$(cluster_name)-k8s-$(location)"

k8s_network_policy ?= "calico"## Network Policy to use for k8s
kubernetes_version ?= "1.18.4"## Kubernetes version to use
cluster_os_image?="Canonical:UbuntuServer:18.04-LTS:latest" ## Image to use for cluster
custom_os_image ?= ## Custom OS Image to use for vm creation

Expand All @@ -40,7 +48,7 @@ ansible_user ?= ubuntu
ansible_password ?= ubuntu
all_instances := 'wallaroo-*'
follower_instances := 'wallaroo-followers'
ssh_path?="/home/${ansible_user}/.ssh/authorized_keys"
ssh_path ?="/home/${ansible_user}/.ssh/authorized_keys"

sync_dir ?= ## Directory to sync to cluster
dest_dir ?= ## Directory to placed synced directory in cluster
Expand Down Expand Up @@ -239,13 +247,21 @@ init-pulumi-cluster-stack: check-cluster-name ## Initialize pulumi cluster stack
@echo "\033[36m==> Successfully confirmed Cluster Pulumi Stack is created for location \
'$(location)' at provider '$(provider)'!\033[0m"


init-pulumi-vm-stack: check-cluster-name ## Initialize pulumi vnet stack
@echo "\033[36m==> Confirming Virtual Machine Pulumi Stack is created for location \
'$(location)' at provider '$(provider)'...\033[0m"
cd $(provider)-vm && pulumi stack init '$(cluster_name)-$(location)'
@echo "\033[36m==> Successfully confirmed Virtual Machine Pulumi Stack is created for location \
'$(location)' at provider '$(provider)'!\033[0m"

init-pulumi-k8s-stack: check-cluster-name ## Initialize pulumi k8s cluster stack
@echo "\033[36m==> Confirming k8s Cluster Pulumi Stack is created for location \
'$(location)' at provider '$(provider)'...\033[0m"
cd $(provider)-k8s && pulumi stack init '$(cluster_name)-$(location)'
@echo "\033[36m==> Successfully confirmed k8s Cluster Pulumi Stack is created for location \
'$(location)' at provider '$(provider)'!\033[0m"

create-vnet: check-cluster-name ## Create vnet
@echo "vnet name: '$(vnet_name)'"
@echo "\033[36m==> Confirming Virtual Network is created in location \
Expand Down Expand Up @@ -302,6 +318,28 @@ create-vmss: check-cluster-name ## Create VMSS
@echo "\033[36m==> Successfully confirmed VM Scale Set is created in location \
'$(location)' at provider '$(provider)'!\033[0m"

create-k8s: check-cluster-name ## Create k8s cluster
@echo "\033[36m==> Confirming k8s cluster is created in location \
'$(location)' at provider '$(provider)'...\033[0m"
cd $(provider)-k8s && pulumi up -s '$(cluster_name)-$(location)' \
--config "$(provider)-k8s:location=$(location)" \
--config "$(provider)-k8s:resourcegroup-name=$(resource_group_name)" \
--config "$(provider)-k8s:aks-cluster-name=$(cluster_name)" \
--config "$(provider)-k8s:app-name=$(app_name)" \
--config "$(provider)-k8s:node-pool-name=$(np_name)" \
--config "$(provider)-k8s:sp-name=$(sp_name)" \
--config "$(provider)-k8s:kubernetes-version=$(kubernetes_version)" \
--config "$(provider)-k8s:dns-prefix=$(k8s_dns_prefix)" \
--config "$(provider)-k8s:network-policy=$(k8s_network_policy)" \
--config "$(provider)-k8s:node-count=$(num_instances_arg)" \
--config "$(provider)-k8s:vm-sku=$(vm_sku)" \
--config "$(provider)-k8s:username=$(ansible_user)" \
--config "$(provider)-k8s:ssh-key-data=$(shell cat ${cluster_pub})" \
--config "$(provider)-k8s:project-name=$(cluster_project_name)" \
--yes --non-interactive
@echo "\033[36m==> Successfully confirmed k8s is created in location \
'$(location)' at provider '$(provider)'!\033[0m"

import-vmss: check-cluster-name ## Import exisiting VMSS into Pulumi stack
import-vmss: vmss_id=$(shell az vmss show -g $(resource_group_name) -n $(vmss_name) -o json | jq '.id')
import-vmss:
Expand Down Expand Up @@ -333,6 +371,12 @@ destroy-vnet: check-cluster-name ## Destroy VNet in location
@echo "\033[36m==> Successfully confirmed VNet is destroyed in location \
'$(location)' at provider '$(provider)'!\033[0m"

destroy-k8s: check-cluster-name ## Destroy k8s cluster in location
@echo "\033[36m==> Destroyingk8s Cluster in location '$(location)' at provider '$(provider)'...\033[0m"
cd $(provider)-k8s && pulumi destroy -s '$(cluster_name)-$(location)' --yes
@echo "\033[36m==> Successfully confirmed k8s Cluster is destroyed in location \
'$(location)' at provider '$(provider)'!\033[0m"

destroy-vnet-state: check-cluster-name ## Destroy VNet Pulumi Stack state
@echo "\033[36m==> Destroying VNet Pulumi Stack state...\033[0m"
cd $(provider)-vnet && pulumi stack rm '$(cluster_name)-$(location)' --yes --non-interactive
Expand All @@ -354,6 +398,11 @@ destroy-vm-state: check-cluster-name ## Destroy Cluster Pulumi Stack state
cd $(provider)-vm && pulumi stack rm '$(cluster_name)-$(location)' --yes --non-interactive
@echo "\033[36m==> Virtual Machine Pulumi Stack state successfully destroyed!\033[0m"

destroy-k8s-state: check-cluster-name ## Destroy k8s Cluster Pulumi Stack state
@echo "\033[36m==> Destroying k8s Cluster Pulumi Stack state...\033[0m"
cd $(provider)-k8s && pulumi stack rm '$(cluster_name)-$(location)' --yes --non-interactive
@echo "\033[36m==> k8s Cluster Pulumi Stack state successfully destroyed!\033[0m"

check-cluster-name: ## Check for valid cluster name
$(if $(cluster_name),,$(error 'cluster_name' cannot be empty!))
$(if $(filter $(cluster_name),$(shell echo '$(cluster_name)' | sed 's/[^-a-zA-Z0-9]//g')),,$(error 'cluster_name' can only have [-a-zA-Z0-9] in it!))
Expand Down
18 changes: 17 additions & 1 deletion orchestration/pulumi/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ So far we have only implemented Azure as the provider.

## Modules

The two modules are `azure-vnet` and `azure-cluster`.
The three modules are `azure-vnet`, `azure-cluster`, and `azure-k8s` .

### Azure-VNet

Expand All @@ -23,6 +23,10 @@ hasn't been created yet.

The Azure VM module handles creating the single virtual machine in Azure and the related network components, etc. The state for this is stored in Pulumi. This module was designed for use in creating base images in Azure since this cannot be done via a VMSS.

### Azure-k8s

The k8s module handles creating the Azure AKS kubernetes along with the network policy, Service Principal, etc. The state for this is stored in Pulumi.

## Configuration

### General
Expand Down Expand Up @@ -147,6 +151,18 @@ Examples for orchestrating an Azure VM. This should primarily be used for OS ima
`eastus`:
`make generate-vm-inventory cluster_name=sample location=eastus`

#### k8s Examples

Currently, there is minimal configuration provided for spinning up an Azure AKS cluster. Below are the steps once can take to spin up and tear down a cluster.

* Initialize Pulumi k8s Stack with the `cluster_name` of `testkube` in the default location: `make init-pulumi-k8s-stack cluster_name=testkube`

* Use Pulumi to create k8s/AKS cluster with one virtual machine and using the `Standard_F32s_v2` VM Sku in the default location: ` make init-pulumi-k8s-stack cluster_name=testkube num_followers=0 vm_sku=Standard_F32s_v2`

* Use Pulumi to destroy the k8s/AKS cluster `testkube`: `make destroy-k8s cluster_name=testkube`

* Destroy the Pulumi stack state for the k8s/AKS cluster `testkube`: `make destroy-k8s-state cluster_name=testkube`

## Debugging Ansible for Azure

Test ansible communication with the all cluster nodes:
Expand Down
2 changes: 2 additions & 0 deletions orchestration/pulumi/azure-k8s/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
node_modules/
package-lock.json
43 changes: 43 additions & 0 deletions orchestration/pulumi/azure-k8s/Pulumi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
name: azure-k8s
runtime: nodejs
description: Azure k8s AKS Cluster and related Resources creation and management
template:
config:
azure-k8s:location:
description: The Azure location to use (`eastus`, `eastus2`,`centralus`, `westus2`)
default: eastus
azure-k8s:resourcegroup-name:
description: The Azure Resource Group name to use
default: wallaroo-dev_rg
azure-k8s:sp-name:
description: The Azure Service Principal name to use
default: wallaroo-dev_sp
azure-k8s:app-name:
description: The app name to use for the Service Principal
azure-k8s:aks-cluster-name:
description: The Azure AKS Cluster name to use
azure-k8s:node-pool-name:
description: The Azure AKS node pool name to use
default: default
azure-k8s:node-count:
description: The Azure AKS node count to use
azure-k8s:network-policy:
description: The Azure AKS network policy to use (`azure`, `calico`)
default: calico
azure-k8s:ssh-key-data:
description: The ssh key data to use
azure-k8s:project-name:
description: The project name for tagging purporses.
default: orch-dev
azure-k8s:vm-sku:
description: The vm sku to use for the VMs.
azure-k8s:dns-prefix:
description: The dns prefix to use for the k8s.
azure-k8s:username:
description: The username to use for the VMs.
default: ubuntu
azure-k8s:kubernetes-version:
description: The kubernetes version to use.
default: 1.18.4
azure-k8s:vm-sku:
description: The vm sku to use for the VMs.
40 changes: 40 additions & 0 deletions orchestration/pulumi/azure-k8s/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Wallaroo Pulumi Orechestration - K8s Cluster module

This module consists of the orchestration for Wallaroo using Pulumi for the K8s Cluster.
So far we have only implemented Azure as a provider.
k8s Cluster creation and tear down are managed via the `orchestration/pulumi` [Makefile](../Makefile).

## Module details

The K8s Cluster module handles creating the K8s Cluster along with the network profile, service principal, etc. The state for this is stored in Pulumi.

The K8s Cluster module's state is used by the `cluster` module.

Files:

* `package.json` defines all the packages required to run
* `Pulumi.yaml` defines all the variable values used by this module
* `index.js` defines all the resources being created using the variables for properties as appropriate

## Info

This currently brings up a single node pool kubernetes cluster and is using the [Pulumi 1.0.0 SDK KubernetesCluster](https://github.com/pulumi/pulumi-azure/blob/v1.0.0/sdk/nodejs/containerservice/kubernetesCluster.ts) derived from the [terraform azurerm_kubernetes_cluster](https://github.com/terraform-providers/terraform-provider-azurerm/blob/0b1449f2eba668775c41f015603b5f20aee36b17/website/docs/r/kubernetes_cluster.html.markdown)


## Installation

Run the following command to install the packages:

```bash
npm install
```

## Pulumi Remote State

We're relying on Pulumi Remote State in order to store state in a centalized location.

The commands available are:

* `pulumi refresh` to refresh the local cache with remote state

Documentation for additional stack commands can be found at: https://www.pulumi.com/docs/reference/cli/pulumi_stack/
99 changes: 99 additions & 0 deletions orchestration/pulumi/azure-k8s/index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
"use strict";


const azure = require("@pulumi/azure");
const azuread = require("@pulumi/azuread");
const k8s = require("@pulumi/kubernetes");
const pulumi = require("@pulumi/pulumi");
const random = require("@pulumi/random");


let environment = "Development";
let config = new pulumi.Config();

let resourceGroupName = config.require("resourcegroup-name");
let location = config.require("location");
let aksClusterName = config.require("aks-cluster-name");
let appName = config.require("app-name");
let spName = config.require("sp-name");
let projectName = config.require("project-name");
let sshKeyData = config.require("ssh-key-data");
let vmSku = config.require("vm-sku");
let nodeCount = config.require("node-count");
let nodePoolName = config.require("node-pool-name");
let networkPolicy = config.require("network-policy").trim();
let kubernetesVersion = config.require("kubernetes-version");
let dnsPrefix = config.require("dns-prefix");
let username = config.require("username");

const password = new random.RandomPassword("password", {
length: 16,
overrideSpecial: "/@\" ",
special: true,
});

// Create the AD service principal for the K8s cluster.
const adApp = new azuread.Application(appName);
const adSp = new azuread.ServicePrincipal(spName, { applicationId: adApp.applicationId });
const adSpPassword = new azuread.ServicePrincipalPassword("aksSpPassword", {
servicePrincipalId: adSp.id,
value: password.result,
endDate: "2099-01-01T00:00:00Z",
});

// create resource group
let resourceGroup = new azure.core.ResourceGroup(resourceGroupName, {
name: resourceGroupName,
location: location,
tags: {
environment: environment,
project: projectName
}
});

// Now allocate an AKS cluster.
const k8sCluster = new azure.containerservice.KubernetesCluster(aksClusterName, {
name: aksClusterName,
resourceGroupName: resourceGroup.name,
location: location,
agentPoolProfiles: [{
name: nodePoolName,
count: nodeCount,
vmSize: vmSku,
}],
dnsPrefix: dnsPrefix,
linuxProfile: {
adminUsername: username,
sshKey: {
keyData: sshKeyData,
},
},
networkProfile: {
networkPlugin: "azure",
networkPolicy: networkPolicy,
dnsServiceIp: "10.2.0.10",
dockerBridgeCidr: "172.17.0.1/16",
serviceCidr: "10.2.0.0/24"
},
servicePrincipal: {
clientId: adApp.applicationId,
clientSecret: adSpPassword.value,
},
kubernetesVersion: kubernetesVersion,
// TODO: Determine whether it's beneficial to name this on our
// own or continue to allow the name to be generated
// nodeResourceGroup: resourceGroup.name,
tags: {
environment: environment,
project: projectName
}
});

// Expose a K8s provider instance using our custom cluster instance.
const k8sProvider = new k8s.Provider("aksK8s", {
kubeconfig: k8sCluster.kubeConfigRaw,
});

exports.k8sProvider = k8sProvider;
exports.k8sCluster = k8sCluster;
exports.aksClusterName = aksClusterName;
11 changes: 11 additions & 0 deletions orchestration/pulumi/azure-k8s/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
{
"name": "azure-k8s",
"main": "index.js",
"dependencies": {
"@pulumi/azure": "^1.0.0",
"@pulumi/azuread": "^1.8.0",
"@pulumi/kubernetes": "^1.0.0",
"@pulumi/pulumi": "^1.0.0",
"@pulumi/random": "^1.0.0"
}
}

0 comments on commit fe33ba3

Please sign in to comment.