Skip to content
This repository has been archived by the owner on Sep 15, 2021. It is now read-only.

Commit

Permalink
feat: dynamically resolve credentials
Browse files Browse the repository at this point in the history
from configMap or secrets
  • Loading branch information
nicolai86 committed Sep 5, 2017
1 parent da840cf commit 863b85d
Show file tree
Hide file tree
Showing 5 changed files with 128 additions and 42 deletions.
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ this k8s operator allows you to run a 2.1 couchdb cluster on top of k8s.
- [x] management custom object delete (delete cluster)
- [x] deployment template (port, readyness, livelyness)
- [ ] cluster management
- [x] credentials from configMap, secrets
- [x] pod anti affinity
- [x] node selection via labels
- [x] custom labels
- [ ] new pod -> join cluster
- [ ] old pod gone -> leave cluster
- [x] operator definition
Expand Down
18 changes: 18 additions & 0 deletions example.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ spec:
couchdbEnv:
- { name: COUCHDB_USER, value: admin }
- { name: COUCHDB_PASSWORD, value: admin }
# - { name: COUCHDB_PASSWORD, valueFrom: { configMapRef: { name: couchdb-config-map, key: username } } }
# - { name: COUCHDB_PASSWORD, valueFrom: { secretKeyRef: { name: couchdb-secret, key: username } } }
---
kind: Service
apiVersion: v1
Expand All @@ -32,3 +34,19 @@ spec:
targetPort: 5984
nodePort: 31984
type: NodePort
---
apiVersion: v1
kind: ConfigMap
metadata:
name: couchdb-config-map
namespace: default
data:
password: admin-configmap
---
apiVersion: v1
kind: Secret
metadata:
name: couchdb-secret
namespace: default
data:
password: YWRtaW4tc2VjcmV0bWFw # admin-secretmap
3 changes: 3 additions & 0 deletions k8s/deployment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["secrets", "configMaps"]
verbs: ["get"]
- apiGroups: ["stable.couchdb.org"]
resources: ["couchdbs"]
verbs: ["get", "list", "watch", "update"]
Expand Down
139 changes: 101 additions & 38 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
apiv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
Expand Down Expand Up @@ -125,7 +126,7 @@ func main() {
if c.Labels["app"] != "couchdb" {
return
}
log.Printf("pod %#v creation in cluster %q\n", c.UID, c.Labels["cluster"])
log.Printf("pod %#v creation in cluster %q\n", c.UID, c.Labels["couchdb_cluster"])
// TODO check if too many pods. if so, delete
},
UpdateFunc: func(old interface{}, new interface{}) {
Expand All @@ -136,9 +137,9 @@ func main() {
if c.Labels["app"] != "couchdb" {
return
}
log.Printf("pod %#v (%s) update in cluster %q\n", c.UID, c.Status.Phase, c.Labels["cluster"])
log.Printf("pod %#v (%s) update in cluster %q\n", c.UID, c.Status.Phase, c.Labels["couchdb_cluster"])

res := couchRestClient.Get().Namespace(c.Namespace).Resource("couchdbs").Name(c.Labels["cluster"]).Do()
res := couchRestClient.Get().Namespace(c.Namespace).Resource("couchdbs").Name(c.Labels["couchdb_cluster"]).Do()
var cluster *spec.CouchDB
if o, err := res.Get(); err != nil {
// log.Printf("failed to lookup couchdb: %v", err.Error())
Expand All @@ -147,7 +148,7 @@ func main() {
cluster = o.(*spec.CouchDB)
}

list, err := client.CoreV1().Pods(c.Namespace).List(metav1.ListOptions{LabelSelector: fmt.Sprintf("couchdb_cluster=%s", c.Labels["cluster"])})
list, err := client.CoreV1().Pods(c.Namespace).List(metav1.ListOptions{LabelSelector: fmt.Sprintf("couchdb_cluster=%s", c.Labels["couchdb_cluster"])})
if err != nil {
log.Printf("could nod list couchdb cluster %q pods: %v\n", c.Name, err.Error())
return
Expand Down Expand Up @@ -201,8 +202,12 @@ func main() {

{
setup := list.Items[0]
log.Printf("ready to initialize cluster %q/w", cluster.Name, setup.Status.PodIP)
c, _ := couchdb.New(fmt.Sprintf("http:https://%s:5984", setup.Status.PodIP), &http.Client{}, couchdb.WithBasicAuthentication("admin", "admin"))
username, password, err := credentialsFromEnv(client.CoreV1(), cluster.Namespace, cluster.Spec.Pod.CouchDBEnv)
if err != nil {
log.Printf("failed to resolve credentials: %v\n", err.Error())
}
log.Printf("ready to initialize cluster %q from %q: user %q password %q\n", cluster.Name, setup.Status.PodIP, username, password)
c, _ := couchdb.New(fmt.Sprintf("http:https://%s:5984", setup.Status.PodIP), &http.Client{}, couchdb.WithBasicAuthentication(username, password))
for _, p := range list.Items[1:] {
// if err := c.Cluster.BeginSetup(couchdb.SetupOptions{
// BindAddress: "0.0.0.0",
Expand All @@ -218,8 +223,8 @@ func main() {
// }
if err := c.Cluster.AddNode(couchdb.AddNodeOptions{
Host: p.Status.PodIP,
Username: "admin",
Password: "admin",
Username: username,
Password: password,
Port: 5984,
}); err != nil {
log.Printf("add node for node %s failed: %v\n", p.Status.PodIP, err.Error())
Expand All @@ -242,14 +247,14 @@ func main() {
if c.Labels["app"] != "couchdb" {
return
}
log.Printf("pod %#v deletion in cluster %q\n", c.UID, c.Labels["cluster"])
log.Printf("pod %#v deletion in cluster %q\n", c.UID, c.Labels["couchdb_cluster"])
// TODO check if cluster exists & needs more. if so, spawn
// TODO check if cluster exists. if so, remove node
},
})
go controller.Run(nil)
}
// TODO new controller watching for couchdb server pods

{
source := cache.NewListWatchFromClient(
couchRestClient,
Expand Down Expand Up @@ -291,7 +296,7 @@ func main() {

for i := 0; i < c.Spec.Size-len(list.Items); i++ {
log.Printf("creating pod %d for cluster %q in ns %q\n", i, c.Name, c.Namespace)
pod := newCouchdbPod(c.Name, "admin", c.Spec.Pod)
pod := newCouchdbPod(c.Name, c.Spec.Pod)
_, err = client.CoreV1().Pods(c.Namespace).Create(pod)
if err != nil {
log.Printf("failed to start pod: %#v", err.Error())
Expand Down Expand Up @@ -340,33 +345,27 @@ func main() {

probe.SetReady()

log.Println("running...")
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
<-sig
}

func couchdbContainer(baseImage, version string) apiv1.Container {
func couchdbContainer(baseImage, version string, env []apiv1.EnvVar) apiv1.Container {
containerEnv := append(env,
apiv1.EnvVar{
Name: "NODENAME",
ValueFrom: &apiv1.EnvVarSource{
FieldRef: &apiv1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
})

c := apiv1.Container{
Name: "couchdb",
Image: fmt.Sprintf("%s:%s", baseImage, version),
Env: []apiv1.EnvVar{
{
Name: "COUCHDB_USER",
Value: "admin",
},
{
Name: "COUCHDB_PASSWORD",
Value: "admin",
},
{
Name: "NODENAME",
ValueFrom: &apiv1.EnvVarSource{
FieldRef: &apiv1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
Env: containerEnv,
Ports: []apiv1.ContainerPort{
{
Name: "node-local",
Expand Down Expand Up @@ -422,17 +421,66 @@ func getMyPodServiceAccount(kubecli kubernetes.Interface) (string, error) {
return sa, err
}

func newCouchdbPod(clustername, password string, spec *spec.PodPolicy) *apiv1.Pod {
c := couchdbContainer(couchdbImage, couchdbVersion)
// spec.AntiAffinity
func valueFromEnvSource(core corev1.CoreV1Interface, namespace, value string, valueFrom *apiv1.EnvVarSource) (string, error) {
if value != "" {
return value, nil
}
if valueFrom.ConfigMapKeyRef != nil {
mapRef := valueFrom.ConfigMapKeyRef
config, err := core.ConfigMaps(namespace).Get(mapRef.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
return config.Data[mapRef.Key], nil
}

if valueFrom.SecretKeyRef != nil {
secretRef := valueFrom.SecretKeyRef
secret, err := core.Secrets(namespace).Get(secretRef.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
return string(secret.Data[secretRef.Key]), nil
}

return "", nil
}

func credentialsFromEnv(core corev1.CoreV1Interface, namespace string, envs []apiv1.EnvVar) (string, string, error) {
adminUsername := "admin"
adminPassword := "admin"
for _, env := range envs {
if env.Name == "COUCHDB_USER" {
value, err := valueFromEnvSource(core, namespace, env.Value, env.ValueFrom)
if err != nil {
return "", "", err
}
adminUsername = value
}
if env.Name == "COUCHDB_PASSWORD" {
value, err := valueFromEnvSource(core, namespace, env.Value, env.ValueFrom)
if err != nil {
return "", "", err
}
adminPassword = value
}
}
return adminUsername, adminPassword, nil
}

func newCouchdbPod(clustername string, spec *spec.PodPolicy) *apiv1.Pod {
c := couchdbContainer(couchdbImage, couchdbVersion, spec.CouchDBEnv)

labels := map[string]string{
"app": "couchdb",
"couchdb_cluster": clustername,
}
mergeLabels(labels, spec.Labels)
pod := &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "couchdb-",
Labels: map[string]string{
"app": "couchdb",
"couchdb_cluster": clustername,
},
Annotations: map[string]string{},
Labels: labels,
Annotations: map[string]string{},
},
Spec: apiv1.PodSpec{
RestartPolicy: apiv1.RestartPolicyAlways,
Expand All @@ -443,6 +491,7 @@ func newCouchdbPod(clustername, password string, spec *spec.PodPolicy) *apiv1.Po
},
}
if spec.AntiAffinity {
log.Printf("with anti affinty")
selector := &metav1.LabelSelector{MatchLabels: map[string]string{
"couchdb_cluster": clustername,
}}
Expand All @@ -457,5 +506,19 @@ func newCouchdbPod(clustername, password string, spec *spec.PodPolicy) *apiv1.Po
},
}
}
if len(spec.NodeSelector) != 0 {
log.Printf("with node selector")
pod.Spec.NodeSelector = spec.NodeSelector
}
return pod
}

// mergeLables merges l2 into l1. Conflicting label will be skipped.
func mergeLabels(l1, l2 map[string]string) {
for k, v := range l2 {
if _, ok := l1[k]; ok {
continue
}
l1[k] = v
}
}
6 changes: 2 additions & 4 deletions spec/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,8 @@ type PodPolicy struct {
AntiAffinity bool `json:"antiAffinity,omitempty"`

// List of environment variables to set in the couchdb container.
// This is used to configure couchdb process. couchdb cluster cannot be created, when
// bad environement variables are provided. Do not overwrite any flags used to
// bootstrap the cluster (for example `--initial-cluster` flag).
// This field cannot be updated.
// should container COUCHDB_USER and COUCHDB_PASSWORD. If it doesn't,
// admin/admin will be choosen
CouchDBEnv []apiv1.EnvVar `json:"couchdbEnv,omitempty"`
}

Expand Down

0 comments on commit 863b85d

Please sign in to comment.