diff --git a/README.md b/README.md index 5b783c3..b1f15a7 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,10 @@ this k8s operator allows you to run a 2.1 couchdb cluster on top of k8s. - [x] management custom object delete (delete cluster) - [x] deployment template (port, readyness, livelyness) - [ ] cluster management + - [x] credentials from configMap, secrets + - [x] pod anti affinity + - [x] node selection via labels + - [x] custom labels - [ ] new pod -> join cluster - [ ] old pod gone -> leave cluster - [x] operator definition diff --git a/example.yml b/example.yml index 9a6ccdc..4757cbd 100644 --- a/example.yml +++ b/example.yml @@ -16,6 +16,8 @@ spec: couchdbEnv: - { name: COUCHDB_USER, value: admin } - { name: COUCHDB_PASSWORD, value: admin } + # - { name: COUCHDB_PASSWORD, valueFrom: { configMapRef: { name: couchdb-config-map, key: username } } } + # - { name: COUCHDB_PASSWORD, valueFrom: { secretKeyRef: { name: couchdb-secret, key: username } } } --- kind: Service apiVersion: v1 @@ -32,3 +34,19 @@ spec: targetPort: 5984 nodePort: 31984 type: NodePort +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: couchdb-config-map + namespace: default +data: + password: admin-configmap +--- +apiVersion: v1 +kind: Secret +metadata: + name: couchdb-secret + namespace: default +data: + password: YWRtaW4tc2VjcmV0bWFw # admin-secretmap diff --git a/k8s/deployment.yml b/k8s/deployment.yml index 26f8024..9093494 100644 --- a/k8s/deployment.yml +++ b/k8s/deployment.yml @@ -47,6 +47,9 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["secrets", "configMaps"] + verbs: ["get"] - apiGroups: ["stable.couchdb.org"] resources: ["couchdbs"] verbs: ["get", "list", "watch", "update"] diff --git a/main.go b/main.go index 93b3d32..2cd0302 100644 --- a/main.go +++ b/main.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" apiv1 "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -125,7 +126,7 @@ func main() { if c.Labels["app"] != "couchdb" { return } - log.Printf("pod %#v creation in cluster %q\n", c.UID, c.Labels["cluster"]) + log.Printf("pod %#v creation in cluster %q\n", c.UID, c.Labels["couchdb_cluster"]) // TODO check if too many pods. if so, delete }, UpdateFunc: func(old interface{}, new interface{}) { @@ -136,9 +137,9 @@ func main() { if c.Labels["app"] != "couchdb" { return } - log.Printf("pod %#v (%s) update in cluster %q\n", c.UID, c.Status.Phase, c.Labels["cluster"]) + log.Printf("pod %#v (%s) update in cluster %q\n", c.UID, c.Status.Phase, c.Labels["couchdb_cluster"]) - res := couchRestClient.Get().Namespace(c.Namespace).Resource("couchdbs").Name(c.Labels["cluster"]).Do() + res := couchRestClient.Get().Namespace(c.Namespace).Resource("couchdbs").Name(c.Labels["couchdb_cluster"]).Do() var cluster *spec.CouchDB if o, err := res.Get(); err != nil { // log.Printf("failed to lookup couchdb: %v", err.Error()) @@ -147,7 +148,7 @@ func main() { cluster = o.(*spec.CouchDB) } - list, err := client.CoreV1().Pods(c.Namespace).List(metav1.ListOptions{LabelSelector: fmt.Sprintf("couchdb_cluster=%s", c.Labels["cluster"])}) + list, err := client.CoreV1().Pods(c.Namespace).List(metav1.ListOptions{LabelSelector: fmt.Sprintf("couchdb_cluster=%s", c.Labels["couchdb_cluster"])}) if err != nil { log.Printf("could nod list couchdb cluster %q pods: %v\n", c.Name, err.Error()) return @@ -201,8 +202,12 @@ func main() { { setup := list.Items[0] - log.Printf("ready to initialize cluster %q/w", cluster.Name, setup.Status.PodIP) - c, _ := couchdb.New(fmt.Sprintf("http://%s:5984", setup.Status.PodIP), &http.Client{}, couchdb.WithBasicAuthentication("admin", "admin")) + username, password, err := credentialsFromEnv(client.CoreV1(), cluster.Namespace, cluster.Spec.Pod.CouchDBEnv) + if err != nil { + log.Printf("failed to resolve credentials: %v\n", err.Error()) + } + log.Printf("ready to initialize cluster %q from %q: user %q password %q\n", cluster.Name, setup.Status.PodIP, username, password) + c, _ := couchdb.New(fmt.Sprintf("http://%s:5984", setup.Status.PodIP), &http.Client{}, couchdb.WithBasicAuthentication(username, password)) for _, p := range list.Items[1:] { // if err := c.Cluster.BeginSetup(couchdb.SetupOptions{ // BindAddress: "0.0.0.0", @@ -218,8 +223,8 @@ func main() { // } if err := c.Cluster.AddNode(couchdb.AddNodeOptions{ Host: p.Status.PodIP, - Username: "admin", - Password: "admin", + Username: username, + Password: password, Port: 5984, }); err != nil { log.Printf("add node for node %s failed: %v\n", p.Status.PodIP, err.Error()) @@ -242,14 +247,14 @@ func main() { if c.Labels["app"] != "couchdb" { return } - log.Printf("pod %#v deletion in cluster %q\n", c.UID, c.Labels["cluster"]) + log.Printf("pod %#v deletion in cluster %q\n", c.UID, c.Labels["couchdb_cluster"]) // TODO check if cluster exists & needs more. if so, spawn // TODO check if cluster exists. if so, remove node }, }) go controller.Run(nil) } - // TODO new controller watching for couchdb server pods + { source := cache.NewListWatchFromClient( couchRestClient, @@ -291,7 +296,7 @@ func main() { for i := 0; i < c.Spec.Size-len(list.Items); i++ { log.Printf("creating pod %d for cluster %q in ns %q\n", i, c.Name, c.Namespace) - pod := newCouchdbPod(c.Name, "admin", c.Spec.Pod) + pod := newCouchdbPod(c.Name, c.Spec.Pod) _, err = client.CoreV1().Pods(c.Namespace).Create(pod) if err != nil { log.Printf("failed to start pod: %#v", err.Error()) @@ -340,33 +345,27 @@ func main() { probe.SetReady() + log.Println("running...") sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt) <-sig } -func couchdbContainer(baseImage, version string) apiv1.Container { +func couchdbContainer(baseImage, version string, env []apiv1.EnvVar) apiv1.Container { + containerEnv := append(env, + apiv1.EnvVar{ + Name: "NODENAME", + ValueFrom: &apiv1.EnvVarSource{ + FieldRef: &apiv1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }) + c := apiv1.Container{ Name: "couchdb", Image: fmt.Sprintf("%s:%s", baseImage, version), - Env: []apiv1.EnvVar{ - { - Name: "COUCHDB_USER", - Value: "admin", - }, - { - Name: "COUCHDB_PASSWORD", - Value: "admin", - }, - { - Name: "NODENAME", - ValueFrom: &apiv1.EnvVarSource{ - FieldRef: &apiv1.ObjectFieldSelector{ - FieldPath: "status.podIP", - }, - }, - }, - }, + Env: containerEnv, Ports: []apiv1.ContainerPort{ { Name: "node-local", @@ -422,17 +421,66 @@ func getMyPodServiceAccount(kubecli kubernetes.Interface) (string, error) { return sa, err } -func newCouchdbPod(clustername, password string, spec *spec.PodPolicy) *apiv1.Pod { - c := couchdbContainer(couchdbImage, couchdbVersion) - // spec.AntiAffinity +func valueFromEnvSource(core corev1.CoreV1Interface, namespace, value string, valueFrom *apiv1.EnvVarSource) (string, error) { + if value != "" { + return value, nil + } + if valueFrom.ConfigMapKeyRef != nil { + mapRef := valueFrom.ConfigMapKeyRef + config, err := core.ConfigMaps(namespace).Get(mapRef.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return config.Data[mapRef.Key], nil + } + + if valueFrom.SecretKeyRef != nil { + secretRef := valueFrom.SecretKeyRef + secret, err := core.Secrets(namespace).Get(secretRef.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return string(secret.Data[secretRef.Key]), nil + } + + return "", nil +} + +func credentialsFromEnv(core corev1.CoreV1Interface, namespace string, envs []apiv1.EnvVar) (string, string, error) { + adminUsername := "admin" + adminPassword := "admin" + for _, env := range envs { + if env.Name == "COUCHDB_USER" { + value, err := valueFromEnvSource(core, namespace, env.Value, env.ValueFrom) + if err != nil { + return "", "", err + } + adminUsername = value + } + if env.Name == "COUCHDB_PASSWORD" { + value, err := valueFromEnvSource(core, namespace, env.Value, env.ValueFrom) + if err != nil { + return "", "", err + } + adminPassword = value + } + } + return adminUsername, adminPassword, nil +} + +func newCouchdbPod(clustername string, spec *spec.PodPolicy) *apiv1.Pod { + c := couchdbContainer(couchdbImage, couchdbVersion, spec.CouchDBEnv) + + labels := map[string]string{ + "app": "couchdb", + "couchdb_cluster": clustername, + } + mergeLabels(labels, spec.Labels) pod := &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "couchdb-", - Labels: map[string]string{ - "app": "couchdb", - "couchdb_cluster": clustername, - }, - Annotations: map[string]string{}, + Labels: labels, + Annotations: map[string]string{}, }, Spec: apiv1.PodSpec{ RestartPolicy: apiv1.RestartPolicyAlways, @@ -443,6 +491,7 @@ func newCouchdbPod(clustername, password string, spec *spec.PodPolicy) *apiv1.Po }, } if spec.AntiAffinity { + log.Printf("with anti affinty") selector := &metav1.LabelSelector{MatchLabels: map[string]string{ "couchdb_cluster": clustername, }} @@ -457,5 +506,19 @@ func newCouchdbPod(clustername, password string, spec *spec.PodPolicy) *apiv1.Po }, } } + if len(spec.NodeSelector) != 0 { + log.Printf("with node selector") + pod.Spec.NodeSelector = spec.NodeSelector + } return pod } + +// mergeLables merges l2 into l1. Conflicting label will be skipped. +func mergeLabels(l1, l2 map[string]string) { + for k, v := range l2 { + if _, ok := l1[k]; ok { + continue + } + l1[k] = v + } +} diff --git a/spec/spec.go b/spec/spec.go index 2ba2031..2777cc0 100644 --- a/spec/spec.go +++ b/spec/spec.go @@ -33,10 +33,8 @@ type PodPolicy struct { AntiAffinity bool `json:"antiAffinity,omitempty"` // List of environment variables to set in the couchdb container. - // This is used to configure couchdb process. couchdb cluster cannot be created, when - // bad environement variables are provided. Do not overwrite any flags used to - // bootstrap the cluster (for example `--initial-cluster` flag). - // This field cannot be updated. + // should container COUCHDB_USER and COUCHDB_PASSWORD. If it doesn't, + // admin/admin will be choosen CouchDBEnv []apiv1.EnvVar `json:"couchdbEnv,omitempty"` }