From 06c0d324bf93a037010186fe54e40590ea39d92c Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Wed, 7 Mar 2018 04:19:03 -0800 Subject: [PATCH] Rewrite the installer such that manifests are maintainable --- Dockerfile-builder | 18 +- Gopkg.lock | 40 +- Gopkg.toml | 2 + Makefile | 2 +- cmd/argo/commands/install.go | 571 +----------------- deploy/artifact-secret.yaml | 9 - deploy/controller-config.yaml | 18 - deploy/controller-deployment.yaml | 18 - install/install.go | 359 +++++++++++ install/manifests/01_workflow-crd.yaml | 13 + .../manifests/02a_workflow-controller-sa.yaml | 5 + .../02b_workflow-controller-cluster-role.yaml | 42 ++ ...rkflow-controller-cluster-rolebinding.yaml | 12 + .../02d_workflow-controller-configmap.yaml | 9 + .../02e_workflow-controller-deployment.yaml | 29 + install/manifests/03a_argo-ui-sa.yaml | 5 + .../manifests/03b_argo-ui-cluster-role.yaml | 29 + .../03c_argo-ui-cluster-rolebinding.yaml | 12 + install/manifests/03d_argo-ui-deployment.yaml | 30 + install/manifests/03e_argo-ui-service.yaml | 11 + 20 files changed, 615 insertions(+), 619 deletions(-) delete mode 100644 deploy/artifact-secret.yaml delete mode 100644 deploy/controller-config.yaml delete mode 100644 deploy/controller-deployment.yaml create mode 100644 install/install.go create mode 100644 install/manifests/01_workflow-crd.yaml create mode 100644 install/manifests/02a_workflow-controller-sa.yaml create mode 100644 install/manifests/02b_workflow-controller-cluster-role.yaml create mode 100644 install/manifests/02c_workflow-controller-cluster-rolebinding.yaml create mode 100644 install/manifests/02d_workflow-controller-configmap.yaml create mode 100644 install/manifests/02e_workflow-controller-deployment.yaml create mode 100644 install/manifests/03a_argo-ui-sa.yaml create mode 100644 install/manifests/03b_argo-ui-cluster-role.yaml create mode 100644 install/manifests/03c_argo-ui-cluster-rolebinding.yaml create mode 100644 install/manifests/03d_argo-ui-deployment.yaml create mode 100644 install/manifests/03e_argo-ui-service.yaml diff --git a/Dockerfile-builder b/Dockerfile-builder index 194b94423b46..6650e03fabc3 100644 --- a/Dockerfile-builder +++ b/Dockerfile-builder @@ -8,23 +8,21 @@ RUN apt-get update && apt-get install -y \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Install go -ENV GO_VERSION 1.9.1 +ENV GO_VERSION 1.9.3 ENV GO_ARCH amd64 ENV GOPATH /root/go ENV PATH ${GOPATH}/bin:/usr/local/go/bin:${PATH} RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ - # A dummy directory is created under $GOPATH/src/dummy so we are able to use dep - # to install all the packages of our dep lock file - mkdir -p ${GOPATH}/src/dummy + wget https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -O /usr/local/bin/dep && \ + chmod +x /usr/local/bin/dep -# Install Go dependencies and some tooling -COPY Gopkg.toml ${GOPATH}/src/dummy -COPY Gopkg.lock ${GOPATH}/src/dummy -RUN go get -u github.com/golang/dep/cmd/dep && \ - rm -rf ${GOPATH}/src/github.com && \ - cd ${GOPATH}/src/dummy && \ +# A dummy directory is created under $GOPATH/src/dummy so we are able to use dep +# to install all the packages of our dep lock file +COPY Gopkg.toml ${GOPATH}/src/dummy/Gopkg.toml +COPY Gopkg.lock ${GOPATH}/src/dummy/Gopkg.lock +RUN cd ${GOPATH}/src/dummy && \ dep ensure -vendor-only && \ mv vendor/* ${GOPATH}/src/ && \ rmdir vendor && \ diff --git a/Gopkg.lock b/Gopkg.lock index 87b42762c455..c02acf33a1c0 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -19,6 +19,15 @@ packages = ["."] revision = "de5bf2ad457846296e2031421a34e2568e304e35" +[[projects]] + branch = "master" + name = "github.com/argoproj/argo-cd" + packages = [ + "util/diff", + "util/kube" + ] + revision = "405b47ffe62cff5031748f39c1f96551921b26f5" + [[projects]] name = "github.com/davecgh/go-spew" packages = ["spew"] @@ -266,6 +275,12 @@ revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" +[[projects]] + name = "github.com/sergi/go-diff" + packages = ["diffmatchpatch"] + revision = "1744e2970ca51c86172c8190fadad617561ed6e7" + version = "v1.0.0" + [[projects]] name = "github.com/sirupsen/logrus" packages = ["."] @@ -325,6 +340,21 @@ packages = ["."] revision = "dcecefd839c4193db0d35b88ec65b4c12d360ab0" +[[projects]] + name = "github.com/yudai/gojsondiff" + packages = [ + ".", + "formatter" + ] + revision = "7b1b7adf999dab73a6eb02669c3d82dbb27a3dd6" + version = "1.0.0" + +[[projects]] + branch = "master" + name = "github.com/yudai/golcs" + packages = ["."] + revision = "ecda9a501e8220fae3b4b600c3db4b0ba22cfc68" + [[projects]] branch = "master" name = "golang.org/x/crypto" @@ -356,6 +386,12 @@ ] revision = "30785a2c434e431ef7c507b54617d6a951d5f2b4" +[[projects]] + branch = "master" + name = "golang.org/x/sync" + packages = ["errgroup"] + revision = "fd80eb99c8f653c847d294a001bdf2a3a6f768f5" + [[projects]] branch = "master" name = "golang.org/x/sys" @@ -476,6 +512,7 @@ branch = "release-1.9" name = "k8s.io/apimachinery" packages = [ + "pkg/api/equality", "pkg/api/errors", "pkg/api/meta", "pkg/api/resource", @@ -527,6 +564,7 @@ packages = [ "discovery", "discovery/fake", + "dynamic", "kubernetes", "kubernetes/fake", "kubernetes/scheme", @@ -659,6 +697,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "f9b956f3b3fb37bf455023ae90d99bfeb97b5e481db97876ff43e91dfec0829c" + inputs-digest = "0518996e667ff6aaf9dc0d71f9ee3fca5a5094a69018dd74653db8d1b313a738" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 45c944033b31..39767560744f 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -3,6 +3,8 @@ required = [ "k8s.io/code-generator/cmd/deepcopy-gen", "k8s.io/code-generator/cmd/informer-gen", "k8s.io/code-generator/cmd/lister-gen", + # required by packr build (which we don't import as a package) + "golang.org/x/sync/errgroup", ] [[constraint]] diff --git a/Makefile b/Makefile index b64fe6988250..b4a583855582 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,7 @@ builder: .PHONY: cli cli: - go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${ARGO_CLI_NAME} ./cmd/argo + CGO_ENABLED=0 go run vendor/github.com/gobuffalo/packr/packr/main.go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${ARGO_CLI_NAME} ./cmd/argo .PHONY: cli-linux cli-linux: builder diff --git a/cmd/argo/commands/install.go b/cmd/argo/commands/install.go index e5c8e3dbc58b..cb0c1b5d0944 100644 --- a/cmd/argo/commands/install.go +++ b/cmd/argo/commands/install.go @@ -1,31 +1,10 @@ package commands import ( - "fmt" - "reflect" - "strconv" - "time" - - "github.com/argoproj/argo" - "github.com/argoproj/argo/errors" - "github.com/argoproj/argo/pkg/apis/workflow" - wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/install" "github.com/argoproj/argo/workflow/common" - "github.com/argoproj/argo/workflow/controller" - "github.com/ghodss/yaml" - goversion "github.com/hashicorp/go-version" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - appsv1beta2 "k8s.io/api/apps/v1beta2" - apiv1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apierr "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" ) var ( @@ -40,33 +19,20 @@ var ( DefaultUiImage = imageNamespace + "/argoui:" + imageTag ) -// InstallFlags has all the required parameters for installing Argo. -type InstallFlags struct { - Upgrade bool // --upgrade - DryRun bool // --dry-run - Namespace string // --install-namespace - InstanceID string // --instanceid - ConfigMap string // --configmap - ControllerName string // --controller-name - ControllerImage string // --controller-image - ServiceAccount string // --service-account - ExecutorImage string // --executor-image - UIName string // --ui-name - UIImage string // --ui-image - UIBaseHref string // --ui-base-href - UIServiceAccount string // --ui-service-account - EnableWebConsole bool // --enable-web-console -} - func NewInstallCommand() *cobra.Command { var ( - installArgs InstallFlags + installArgs install.InstallOptions ) var command = &cobra.Command{ Use: "install", Short: "install Argo", Run: func(cmd *cobra.Command, args []string) { - Install(installArgs) + _ = initKubeClient() + installer, err := install.NewInstaller(restConfig, installArgs) + if err != nil { + log.Fatal(err) + } + installer.Install() }, } command.Flags().BoolVar(&installArgs.Upgrade, "upgrade", false, "upgrade controller/ui deployments and configmap if already installed") @@ -74,532 +40,13 @@ func NewInstallCommand() *cobra.Command { command.Flags().StringVar(&installArgs.Namespace, "install-namespace", common.DefaultControllerNamespace, "install into a specific Namespace") command.Flags().StringVar(&installArgs.InstanceID, "instanceid", "", "optional instance id to use for the controller (for multi-controller environments)") command.Flags().StringVar(&installArgs.ConfigMap, "configmap", common.DefaultConfigMapName(common.DefaultControllerDeploymentName), "install controller using preconfigured configmap") - command.Flags().StringVar(&installArgs.ControllerName, "controller-name", common.DefaultControllerDeploymentName, "name of controller deployment") command.Flags().StringVar(&installArgs.ControllerImage, "controller-image", DefaultControllerImage, "use a specified controller image") command.Flags().StringVar(&installArgs.ServiceAccount, "service-account", "", "use a specified service account for the workflow-controller deployment") command.Flags().StringVar(&installArgs.ExecutorImage, "executor-image", DefaultExecutorImage, "use a specified executor image") - command.Flags().StringVar(&installArgs.UIName, "ui-name", ArgoUIDeploymentName, "name of ui deployment") command.Flags().StringVar(&installArgs.UIImage, "ui-image", DefaultUiImage, "use a specified ui image") command.Flags().StringVar(&installArgs.UIBaseHref, "ui-base-href", "/", "UI base url") command.Flags().StringVar(&installArgs.UIServiceAccount, "ui-service-account", "", "use a specified service account for the argo-ui deployment") command.Flags().BoolVar(&installArgs.EnableWebConsole, "enable-web-console", false, "allows exec access into running step container using Argo UI") + command.Flags().StringVar(&installArgs.ImagePullPolicy, "image-pull-policy", "", "imagePullPolicy to use for deployments") return command } - -func printYAML(obj interface{}) { - objBytes, err := yaml.Marshal(obj) - if err != nil { - log.Fatalf("Failed to marshal %v", obj) - } - fmt.Printf("---\n%s\n", string(objBytes)) -} - -// Install installs the Argo controller and UI in the given Namespace -func Install(args InstallFlags) { - clientset = initKubeClient() - if !args.DryRun { - fmt.Printf("Installing Argo %s into namespace '%s'\n", argo.GetVersion(), args.Namespace) - kubernetesVersionCheck(clientset) - } - installCRD(clientset, args) - if args.ServiceAccount == "" && clusterAdminExists(clientset) { - createServiceAccount(clientset, ArgoControllerServiceAccount, args) - createClusterRole(clientset, ArgoControllerClusterRole, ArgoControllerPolicyRules, args) - createClusterRoleBinding(clientset, ArgoControllerClusterRoleBinding, ArgoControllerServiceAccount, ArgoControllerClusterRole, args) - args.ServiceAccount = ArgoControllerServiceAccount - } - if args.UIServiceAccount == "" && clusterAdminExists(clientset) { - createServiceAccount(clientset, ArgoUIServiceAccount, args) - createClusterRole(clientset, ArgoUIClusterRole, ArgoUIPolicyRules, args) - createClusterRoleBinding(clientset, ArgoUIClusterRoleBinding, ArgoUIServiceAccount, ArgoUIClusterRole, args) - args.UIServiceAccount = ArgoUIServiceAccount - } - installConfigMap(clientset, args) - installController(clientset, args) - installUI(clientset, args) - installUIService(clientset, args) -} - -func clusterAdminExists(clientset *kubernetes.Clientset) bool { - // TODO: change this method to check if RBAC is enabled - clusterRoles := clientset.RbacV1().ClusterRoles() - _, err := clusterRoles.Get("cluster-admin", metav1.GetOptions{}) - if err != nil { - if apierr.IsNotFound(err) { - return false - } - log.Fatalf("Failed to lookup 'cluster-admin' role: %v", err) - } - return true -} - -func createServiceAccount(clientset *kubernetes.Clientset, serviceAccountName string, args InstallFlags) { - serviceAccount := apiv1.ServiceAccount{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ServiceAccount", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: serviceAccountName, - Namespace: args.Namespace, - }, - } - if args.DryRun { - printYAML(serviceAccount) - return - } - _, err := clientset.CoreV1().ServiceAccounts(args.Namespace).Create(&serviceAccount) - if err != nil { - if !apierr.IsAlreadyExists(err) { - log.Fatalf("Failed to create service account '%s': %v\n", serviceAccountName, err) - } - fmt.Printf("ServiceAccount '%s' already exists\n", serviceAccountName) - return - } - fmt.Printf("ServiceAccount '%s' created\n", serviceAccountName) -} - -func createClusterRole(clientset *kubernetes.Clientset, clusterRoleName string, rules []rbacv1.PolicyRule, args InstallFlags) { - clusterRole := rbacv1.ClusterRole{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "rbac.authorization.k8s.io/v1", - Kind: "ClusterRole", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: clusterRoleName, - }, - Rules: rules, - } - if args.DryRun { - printYAML(clusterRole) - return - } - crclient := clientset.RbacV1().ClusterRoles() - _, err := crclient.Create(&clusterRole) - if err != nil { - if !apierr.IsAlreadyExists(err) { - log.Fatalf("Failed to create ClusterRole '%s': %v\n", clusterRoleName, err) - } - existingClusterRole, err := crclient.Get(clusterRoleName, metav1.GetOptions{}) - if err != nil { - log.Fatalf("Failed to get ClusterRole '%s': %v\n", clusterRoleName, err) - } - if !reflect.DeepEqual(existingClusterRole.Rules, clusterRole.Rules) { - if !args.Upgrade { - log.Fatalf("ClusterRole '%s' requires upgrade. Rerun with --upgrade to update the configuration", clusterRoleName) - } - _, err = crclient.Update(&clusterRole) - if err != nil { - log.Fatalf("Failed to update ClusterRole '%s': %v\n", clusterRoleName, err) - } - fmt.Printf("ClusterRole '%s' updated\n", clusterRoleName) - } else { - fmt.Printf("Existing ClusterRole '%s' up-to-date\n", clusterRoleName) - } - } else { - fmt.Printf("ClusterRole '%s' created\n", clusterRoleName) - } -} - -func createClusterRoleBinding(clientset *kubernetes.Clientset, clusterBindingRoleName, serviceAccountName, clusterRoleName string, args InstallFlags) { - roleBinding := rbacv1.ClusterRoleBinding{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "rbac.authorization.k8s.io/v1", - Kind: "ClusterRoleBinding", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: clusterBindingRoleName, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: clusterRoleName, - }, - Subjects: []rbacv1.Subject{ - { - Kind: rbacv1.ServiceAccountKind, - Name: serviceAccountName, - Namespace: args.Namespace, - }, - }, - } - if args.DryRun { - printYAML(roleBinding) - return - } - _, err := clientset.RbacV1().ClusterRoleBindings().Create(&roleBinding) - if err != nil { - if !apierr.IsAlreadyExists(err) { - log.Fatalf("Failed to create ClusterRoleBinding %s: %v\n", clusterBindingRoleName, err) - } - fmt.Printf("ClusterRoleBinding '%s' already exists\n", clusterBindingRoleName) - return - } - fmt.Printf("ClusterRoleBinding '%s' created, bound '%s' to '%s'\n", clusterBindingRoleName, serviceAccountName, clusterRoleName) -} - -func kubernetesVersionCheck(clientset *kubernetes.Clientset) { - // Check if the Kubernetes version is >= 1.8 - versionInfo, err := clientset.ServerVersion() - if err != nil { - log.Fatalf("Failed to get Kubernetes version: %v", err) - } - - serverVersion, err := goversion.NewVersion(versionInfo.String()) - if err != nil { - log.Fatalf("Failed to create version: %v", err) - } - - minVersion, err := goversion.NewVersion("1.8") - if err != nil { - log.Fatalf("Failed to create minimum version: %v", err) - } - - if serverVersion.LessThan(minVersion) { - log.Fatalf("Server version %v < %v. Installation won't proceed...\n", serverVersion, minVersion) - } - - fmt.Printf("Proceeding with Kubernetes version %v\n", serverVersion) -} - -func installConfigMap(clientset *kubernetes.Clientset, args InstallFlags) { - cmClient := clientset.CoreV1().ConfigMaps(args.Namespace) - wfConfig := controller.WorkflowControllerConfig{ - ExecutorImage: args.ExecutorImage, - InstanceID: args.InstanceID, - } - configBytes, err := yaml.Marshal(wfConfig) - if err != nil { - log.Fatalf("%+v", errors.InternalWrapError(err)) - } - wfConfigMap := apiv1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: args.ConfigMap, - Namespace: args.Namespace, - }, - Data: map[string]string{ - common.WorkflowControllerConfigMapKey: string(configBytes), - }, - } - if args.DryRun { - printYAML(wfConfigMap) - return - } - _, err = cmClient.Create(&wfConfigMap) - if err != nil { - if !apierr.IsAlreadyExists(err) { - log.Fatalf("Failed to create ConfigMap '%s' in namespace '%s': %v", args.ConfigMap, args.Namespace, err) - } - // Configmap already exists. Check if existing configmap needs an update to a new executor image - existingCM, err := cmClient.Get(args.ConfigMap, metav1.GetOptions{}) - if err != nil { - log.Fatalf("Failed to retrieve ConfigMap '%s' in namespace '%s': %v", args.ConfigMap, args.Namespace, err) - } - configStr, ok := existingCM.Data[common.WorkflowControllerConfigMapKey] - if !ok { - log.Fatalf("ConfigMap '%s' missing key '%s'", args.ConfigMap, common.WorkflowControllerConfigMapKey) - } - var existingConfig controller.WorkflowControllerConfig - err = yaml.Unmarshal([]byte(configStr), &existingConfig) - if err != nil { - log.Fatalf("Failed to load controller configuration: %v", err) - } - if existingConfig.ExecutorImage == wfConfig.ExecutorImage { - fmt.Printf("Existing ConfigMap '%s' up-to-date\n", args.ConfigMap) - return - } - if !args.Upgrade { - log.Fatalf("ConfigMap '%s' requires upgrade. Rerun with --upgrade to update the configuration", args.ConfigMap) - } - existingConfig.ExecutorImage = args.ExecutorImage - configBytes, err := yaml.Marshal(existingConfig) - if err != nil { - log.Fatalf("%+v", errors.InternalWrapError(err)) - } - existingCM.Data = map[string]string{ - common.WorkflowControllerConfigMapKey: string(configBytes), - } - _, err = cmClient.Update(existingCM) - if err != nil { - log.Fatalf("Failed to update ConfigMap '%s' in namespace '%s': %v", args.ConfigMap, args.Namespace, err) - } - fmt.Printf("ConfigMap '%s' updated\n", args.ConfigMap) - } -} - -func installController(clientset *kubernetes.Clientset, args InstallFlags) { - controllerDeployment := appsv1beta2.Deployment{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "apps/v1beta2", - Kind: "Deployment", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: args.ControllerName, - Namespace: args.Namespace, - }, - Spec: appsv1beta2.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": args.ControllerName, - }, - }, - Template: apiv1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app": args.ControllerName, - }, - }, - Spec: apiv1.PodSpec{ - ServiceAccountName: args.ServiceAccount, - Containers: []apiv1.Container{ - { - Name: args.ControllerName, - Image: args.ControllerImage, - Command: []string{"workflow-controller"}, - Args: []string{"--configmap", args.ConfigMap}, - Env: []apiv1.EnvVar{ - { - Name: common.EnvVarNamespace, - ValueFrom: &apiv1.EnvVarSource{ - FieldRef: &apiv1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.namespace", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - createDeploymentHelper(&controllerDeployment, args) -} - -func installUI(clientset *kubernetes.Clientset, args InstallFlags) { - uiDeployment := appsv1beta2.Deployment{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "apps/v1beta2", - Kind: "Deployment", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: args.UIName, - Namespace: args.Namespace, - }, - Spec: appsv1beta2.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": args.UIName, - }, - }, - Template: apiv1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app": args.UIName, - }, - }, - Spec: apiv1.PodSpec{ - ServiceAccountName: args.UIServiceAccount, - Containers: []apiv1.Container{ - { - Name: args.UIName, - Image: args.UIImage, - Env: []apiv1.EnvVar{ - { - Name: common.EnvVarNamespace, - ValueFrom: &apiv1.EnvVarSource{ - FieldRef: &apiv1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "IN_CLUSTER", - Value: "true", - }, - { - Name: "ENABLE_WEB_CONSOLE", - Value: strconv.FormatBool(args.EnableWebConsole), - }, - { - Name: "BASE_HREF", - Value: args.UIBaseHref, - }, - }, - }, - }, - }, - }, - }, - } - createDeploymentHelper(&uiDeployment, args) -} - -// createDeploymentHelper is helper to create or update an existing deployment (if --upgrade was supplied) -func createDeploymentHelper(deployment *appsv1beta2.Deployment, args InstallFlags) { - depClient := clientset.AppsV1beta2().Deployments(args.Namespace) - var result *appsv1beta2.Deployment - var err error - if args.DryRun { - printYAML(deployment) - return - } - result, err = depClient.Create(deployment) - if err != nil { - if !apierr.IsAlreadyExists(err) { - log.Fatal(err) - } - // deployment already exists - existing, err := depClient.Get(deployment.ObjectMeta.Name, metav1.GetOptions{}) - if err != nil { - log.Fatalf("Failed to get existing deployment: %v", err) - } - if upgradeNeeded(deployment, existing) { - if !args.Upgrade { - log.Fatalf("Deployment '%s' requires upgrade. Rerun with --upgrade to upgrade the deployment", deployment.ObjectMeta.Name) - } - existing, err = depClient.Update(deployment) - if err != nil { - log.Fatalf("Failed to update deployment: %v", err) - } - fmt.Printf("Existing deployment '%s' updated\n", existing.GetObjectMeta().GetName()) - } else { - fmt.Printf("Existing deployment '%s' up-to-date\n", existing.GetObjectMeta().GetName()) - } - } else { - fmt.Printf("Deployment '%s' created\n", result.GetObjectMeta().GetName()) - } -} - -// upgradeNeeded checks two deployments and returns whether or not there are obvious -// differences in a few deployment/container spec fields that would warrant an -// upgrade. WARNING: This is not intended to be comprehensive -- its primary purpose -// is to check if the controller/UI image is out of date with this version of argo. -func upgradeNeeded(dep1, dep2 *appsv1beta2.Deployment) bool { - if len(dep1.Spec.Template.Spec.Containers) != len(dep2.Spec.Template.Spec.Containers) { - return true - } - for i := 0; i < len(dep1.Spec.Template.Spec.Containers); i++ { - ctr1 := dep1.Spec.Template.Spec.Containers[i] - ctr2 := dep2.Spec.Template.Spec.Containers[i] - if ctr1.Name != ctr2.Name { - return true - } - if ctr1.Image != ctr2.Image { - return true - } - if !reflect.DeepEqual(ctr1.Env, ctr2.Env) { - return true - } - if !reflect.DeepEqual(ctr1.Command, ctr2.Command) { - return true - } - if !reflect.DeepEqual(ctr1.Args, ctr2.Args) { - return true - } - } - return false -} - -func installUIService(clientset *kubernetes.Clientset, args InstallFlags) { - svcName := ArgoUIServiceName - svcClient := clientset.CoreV1().Services(args.Namespace) - uiSvc := apiv1.Service{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: svcName, - Namespace: args.Namespace, - }, - Spec: apiv1.ServiceSpec{ - Ports: []apiv1.ServicePort{ - { - Port: 80, - TargetPort: intstr.FromInt(8001), - }, - }, - Selector: map[string]string{ - "app": args.UIName, - }, - }, - } - if args.DryRun { - printYAML(uiSvc) - return - } - _, err := svcClient.Create(&uiSvc) - if err != nil { - if !apierr.IsAlreadyExists(err) { - log.Fatal(err) - } - fmt.Printf("Service '%s' already exists\n", svcName) - } else { - fmt.Printf("Service '%s' created\n", svcName) - } -} - -func installCRD(clientset *kubernetes.Clientset, args InstallFlags) { - workflowCRD := apiextensionsv1beta1.CustomResourceDefinition{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "apiextensions.k8s.io/v1beta1", - Kind: "CustomResourceDefinition", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: workflow.FullName, - }, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: workflow.Group, - Version: wfv1.SchemeGroupVersion.Version, - Scope: apiextensionsv1beta1.NamespaceScoped, - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: workflow.Plural, - Kind: workflow.Kind, - ShortNames: []string{workflow.ShortName}, - }, - }, - } - if args.DryRun { - printYAML(workflowCRD) - return - } - apiextensionsclientset := apiextensionsclient.NewForConfigOrDie(restConfig) - _, err := apiextensionsclientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(&workflowCRD) - if err != nil { - if !apierr.IsAlreadyExists(err) { - log.Fatalf("Failed to create CustomResourceDefinition: %v", err) - } - fmt.Printf("CustomResourceDefinition '%s' already exists\n", workflow.FullName) - } - // wait for CRD being established - var crd *apiextensionsv1beta1.CustomResourceDefinition - err = wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { - crd, err = apiextensionsclientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(workflow.FullName, metav1.GetOptions{}) - if err != nil { - return false, err - } - for _, cond := range crd.Status.Conditions { - switch cond.Type { - case apiextensionsv1beta1.Established: - if cond.Status == apiextensionsv1beta1.ConditionTrue { - return true, err - } - case apiextensionsv1beta1.NamesAccepted: - if cond.Status == apiextensionsv1beta1.ConditionFalse { - log.Errorf("Name conflict: %v", cond.Reason) - } - } - } - return false, err - }) - if err != nil { - log.Fatalf("Failed to wait for CustomResourceDefinition: %v", err) - } -} diff --git a/deploy/artifact-secret.yaml b/deploy/artifact-secret.yaml deleted file mode 100644 index c2d9cc1f38df..000000000000 --- a/deploy/artifact-secret.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# This creates a secret with s3 bucket credentials matching the -# default minio credentials during `helm install stable/minio` -apiVersion: v1 -kind: Secret -metadata: - name: default-minio-credentials -data: - accessKey: QUtJQUlPU0ZPRE5ON0VYQU1QTEU= - secretKey: d0phbHJYVXRuRkVNSS9LN01ERU5HL2JQeFJmaUNZRVhBTVBMRUtFWQ== diff --git a/deploy/controller-config.yaml b/deploy/controller-config.yaml deleted file mode 100644 index 6fa16f81d3a1..000000000000 --- a/deploy/controller-config.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: workflow-controller-configmap -data: - config: | - executorImage: argoproj/argoexec:latest - artifactRepository: - s3: - bucket: my-bucket - endpoint: argo-artifacts-minio-svc:9000 - insecure: true - accessKeySecret: - name: argo-artifacts-minio-user - key: accesskey - secretKeySecret: - name: argo-artifacts-minio-user - key: secretkey diff --git a/deploy/controller-deployment.yaml b/deploy/controller-deployment.yaml deleted file mode 100644 index def0659df63e..000000000000 --- a/deploy/controller-deployment.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: workflow-controller-deployment -spec: - selector: - matchLabels: - app: workflow-controller - template: - metadata: - labels: - app: workflow-controller - spec: - containers: - - name: workflow-controller - image: argoproj/workflow-controller:latest - command: [/bin/workflow-controller] - args: [--configmap, workflow-controller-configmap] diff --git a/install/install.go b/install/install.go new file mode 100644 index 000000000000..69eb5d47ea52 --- /dev/null +++ b/install/install.go @@ -0,0 +1,359 @@ +package install + +import ( + "fmt" + "strconv" + + "github.com/argoproj/argo" + "github.com/argoproj/argo-cd/util/diff" + "github.com/argoproj/argo-cd/util/kube" + "github.com/argoproj/argo/errors" + "github.com/argoproj/argo/workflow/common" + "github.com/argoproj/argo/workflow/controller" + "github.com/ghodss/yaml" + "github.com/gobuffalo/packr" + goversion "github.com/hashicorp/go-version" + log "github.com/sirupsen/logrus" + "github.com/yudai/gojsondiff/formatter" + appsv1beta2 "k8s.io/api/apps/v1beta2" + apiv1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apierr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type InstallOptions struct { + Upgrade bool // --upgrade + DryRun bool // --dry-run + Namespace string // --install-namespace + InstanceID string // --instanceid + ConfigMap string // --configmap + ControllerImage string // --controller-image + ServiceAccount string // --service-account + ExecutorImage string // --executor-image + UIImage string // --ui-image + UIBaseHref string // --ui-base-href + UIServiceAccount string // --ui-service-account + EnableWebConsole bool // --enable-web-console + ImagePullPolicy string // --image-pull-policy +} + +type Installer struct { + InstallOptions + box packr.Box + config *rest.Config + dynClientPool dynamic.ClientPool + disco discovery.DiscoveryInterface + rbacSupported *bool + clientset *kubernetes.Clientset +} + +func NewInstaller(config *rest.Config, opts InstallOptions) (*Installer, error) { + shallowCopy := *config + inst := Installer{ + InstallOptions: opts, + box: packr.NewBox("./manifests"), + config: &shallowCopy, + } + var err error + inst.dynClientPool = dynamic.NewDynamicClientPool(inst.config) + inst.disco, err = discovery.NewDiscoveryClientForConfig(inst.config) + if err != nil { + return nil, err + } + inst.clientset, err = kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + return &inst, nil +} + +// Install installs the Argo controller and UI in the given Namespace +func (i *Installer) Install() { + if !i.DryRun { + fmt.Printf("Installing Argo %s into namespace '%s'\n", argo.GetVersion(), i.Namespace) + kubernetesVersionCheck(i.clientset) + } + i.InstallWorkflowCRD() + i.InstallWorkflowController() + i.InstallArgoUI() +} + +func kubernetesVersionCheck(clientset *kubernetes.Clientset) { + // Check if the Kubernetes version is >= 1.8 + versionInfo, err := clientset.ServerVersion() + if err != nil { + log.Fatalf("Failed to get Kubernetes version: %v", err) + } + + serverVersion, err := goversion.NewVersion(versionInfo.String()) + if err != nil { + log.Fatalf("Failed to create version: %v", err) + } + + minVersion, err := goversion.NewVersion("1.8") + if err != nil { + log.Fatalf("Failed to create minimum version: %v", err) + } + + if serverVersion.LessThan(minVersion) { + log.Fatalf("Server version %v < %v. Installation won't proceed...\n", serverVersion, minVersion) + } + + fmt.Printf("Proceeding with Kubernetes version %v\n", serverVersion) +} + +// IsRBACSupported returns whether or not RBAC is supported on the cluster +func (i *Installer) IsRBACSupported() bool { + if i.rbacSupported != nil { + return *i.rbacSupported + } + // TODO: figure out the proper way to test if RBAC is enabled + clusterRoles := i.clientset.RbacV1().ClusterRoles() + _, err := clusterRoles.Get("cluster-admin", metav1.GetOptions{}) + if err != nil { + if apierr.IsNotFound(err) { + f := false + i.rbacSupported = &f + return false + } + log.Fatalf("Failed to lookup 'cluster-admin' role: %v", err) + } + t := true + i.rbacSupported = &t + return true + +} + +func (i *Installer) InstallWorkflowCRD() { + var workflowCRD apiextensionsv1beta1.CustomResourceDefinition + i.unmarshalManifest("01_workflow-crd.yaml", &workflowCRD) + obj := kube.MustToUnstructured(&workflowCRD) + i.MustInstallResource(obj) +} + +func (i *Installer) InstallWorkflowController() { + var workflowControllerServiceAccount apiv1.ServiceAccount + var workflowControllerClusterRole rbacv1.ClusterRole + var workflowControllerClusterRoleBinding rbacv1.ClusterRoleBinding + //var workflowControllerConfigMap apiv1.ConfigMap + var workflowControllerDeployment appsv1beta2.Deployment + i.unmarshalManifest("02a_workflow-controller-sa.yaml", &workflowControllerServiceAccount) + i.unmarshalManifest("02b_workflow-controller-cluster-role.yaml", &workflowControllerClusterRole) + i.unmarshalManifest("02c_workflow-controller-cluster-rolebinding.yaml", &workflowControllerClusterRoleBinding) + //i.unmarshalManifest("02d_workflow-controller-configmap.yaml", &workflowControllerConfigMap) + i.unmarshalManifest("02e_workflow-controller-deployment.yaml", &workflowControllerDeployment) + workflowControllerDeployment.Spec.Template.Spec.Containers[0].Image = i.ControllerImage + workflowControllerDeployment.Spec.Template.Spec.Containers[0].ImagePullPolicy = apiv1.PullPolicy(i.ImagePullPolicy) + if i.ServiceAccount == "" && i.IsRBACSupported() { + i.MustInstallResource(kube.MustToUnstructured(&workflowControllerServiceAccount)) + i.MustInstallResource(kube.MustToUnstructured(&workflowControllerClusterRole)) + i.MustInstallResource(kube.MustToUnstructured(&workflowControllerClusterRoleBinding)) + } + if i.ServiceAccount != "" { + workflowControllerDeployment.Spec.Template.Spec.ServiceAccountName = i.ServiceAccount + } + //i.MustInstallResource(kube.MustToUnstructured(&workflowControllerConfigMap)) + i.installConfigMap(i.clientset) + i.MustInstallResource(kube.MustToUnstructured(&workflowControllerDeployment)) +} + +func (i *Installer) InstallArgoUI() { + var argoUIServiceAccount apiv1.ServiceAccount + var argoUIClusterRole rbacv1.ClusterRole + var argoUIClusterRoleBinding rbacv1.ClusterRoleBinding + var argoUIDeployment appsv1beta2.Deployment + var argoUIService apiv1.Service + i.unmarshalManifest("03a_argo-ui-sa.yaml", &argoUIServiceAccount) + i.unmarshalManifest("03b_argo-ui-cluster-role.yaml", &argoUIClusterRole) + i.unmarshalManifest("03c_argo-ui-cluster-rolebinding.yaml", &argoUIClusterRoleBinding) + i.unmarshalManifest("03d_argo-ui-deployment.yaml", &argoUIDeployment) + i.unmarshalManifest("03e_argo-ui-service.yaml", &argoUIService) + argoUIDeployment.Spec.Template.Spec.Containers[0].Image = i.UIImage + argoUIDeployment.Spec.Template.Spec.Containers[0].ImagePullPolicy = apiv1.PullPolicy(i.ImagePullPolicy) + setEnv(&argoUIDeployment, "ENABLE_WEB_CONSOLE", strconv.FormatBool(i.EnableWebConsole)) + setEnv(&argoUIDeployment, "BASE_HREF", i.UIBaseHref) + if i.UIServiceAccount == "" && i.IsRBACSupported() { + i.MustInstallResource(kube.MustToUnstructured(&argoUIServiceAccount)) + i.MustInstallResource(kube.MustToUnstructured(&argoUIClusterRole)) + i.MustInstallResource(kube.MustToUnstructured(&argoUIClusterRoleBinding)) + } + if i.UIServiceAccount != "" { + argoUIDeployment.Spec.Template.Spec.ServiceAccountName = i.UIServiceAccount + } + i.MustInstallResource(kube.MustToUnstructured(&argoUIService)) +} + +func setEnv(dep *appsv1beta2.Deployment, key, val string) { + ctr := dep.Spec.Template.Spec.Containers[0] + for i, env := range ctr.Env { + if env.Name == key { + env.Value = val + ctr.Env[i] = env + return + } + } + ctr.Env = append(ctr.Env, apiv1.EnvVar{Name: key, Value: val}) +} + +func (i *Installer) unmarshalManifest(fileName string, obj interface{}) { + yamlBytes, err := i.box.MustBytes(fileName) + checkError(err) + err = yaml.Unmarshal(yamlBytes, obj) + checkError(err) +} + +func (i *Installer) MustInstallResource(obj *unstructured.Unstructured) *unstructured.Unstructured { + obj, err := i.InstallResource(obj) + checkError(err) + return obj +} + +func isNamespaced(obj *unstructured.Unstructured) bool { + switch obj.GetKind() { + case "Namespace", "ClusterRole", "ClusterRoleBinding", "CustomResourceDefinition": + return false + } + return true +} + +// InstallResource creates or updates a resource. If installed resource is up-to-date, does nothing +func (i *Installer) InstallResource(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + if isNamespaced(obj) { + obj.SetNamespace(i.Namespace) + } + // remove 'creationTimestamp' and 'status' fields from object so that the diff will not be modified + obj.SetCreationTimestamp(metav1.Time{}) + delete(obj.Object, "status") + if i.DryRun { + printYAML(obj) + return nil, nil + } + gvk := obj.GroupVersionKind() + dclient, err := i.dynClientPool.ClientForGroupVersionKind(gvk) + if err != nil { + return nil, err + } + apiResource, err := kube.ServerResourceForGroupVersionKind(i.disco, gvk) + if err != nil { + return nil, err + } + reIf := dclient.Resource(apiResource, i.Namespace) + liveObj, err := reIf.Create(obj) + if err == nil { + fmt.Printf("%s '%s' created\n", liveObj.GetKind(), liveObj.GetName()) + return liveObj, nil + } + if !apierr.IsAlreadyExists(err) { + return nil, err + } + liveObj, err = reIf.Get(obj.GetName(), metav1.GetOptions{}) + if err != nil { + return nil, err + } + diffRes := diff.Diff(obj, liveObj) + if !diffRes.Modified { + fmt.Printf("%s '%s' up-to-date\n", liveObj.GetKind(), liveObj.GetName()) + return liveObj, nil + } + if !i.Upgrade { + log.Println(diffRes.ASCIIFormat(obj, formatter.AsciiFormatterConfig{})) + return nil, fmt.Errorf("%s '%s' already exists. Rerun with --upgrade to update", obj.GetKind(), obj.GetName()) + } + liveObj, err = reIf.Update(obj) + if err != nil { + return nil, err + } + fmt.Printf("%s '%s' updated\n", liveObj.GetKind(), liveObj.GetName()) + return liveObj, nil +} + +func printYAML(obj interface{}) { + objBytes, err := yaml.Marshal(obj) + if err != nil { + log.Fatalf("Failed to marshal %v", obj) + } + fmt.Printf("---\n%s\n", string(objBytes)) +} + +// checkError is a convenience function to exit if an error is non-nil and exit if it was +func checkError(err error) { + if err != nil { + log.Fatal(err) + } +} + +func (i *Installer) installConfigMap(clientset *kubernetes.Clientset) { + cmClient := clientset.CoreV1().ConfigMaps(i.Namespace) + wfConfig := controller.WorkflowControllerConfig{ + ExecutorImage: i.ExecutorImage, + InstanceID: i.InstanceID, + } + configBytes, err := yaml.Marshal(wfConfig) + if err != nil { + log.Fatalf("%+v", errors.InternalWrapError(err)) + } + wfConfigMap := apiv1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: i.ConfigMap, + Namespace: i.Namespace, + }, + Data: map[string]string{ + common.WorkflowControllerConfigMapKey: string(configBytes), + }, + } + if i.DryRun { + printYAML(wfConfigMap) + return + } + _, err = cmClient.Create(&wfConfigMap) + if err != nil { + if !apierr.IsAlreadyExists(err) { + log.Fatalf("Failed to create ConfigMap '%s' in namespace '%s': %v", i.ConfigMap, i.Namespace, err) + } + // Configmap already exists. Check if existing configmap needs an update to a new executor image + existingCM, err := cmClient.Get(i.ConfigMap, metav1.GetOptions{}) + if err != nil { + log.Fatalf("Failed to retrieve ConfigMap '%s' in namespace '%s': %v", i.ConfigMap, i.Namespace, err) + } + configStr, ok := existingCM.Data[common.WorkflowControllerConfigMapKey] + if !ok { + log.Fatalf("ConfigMap '%s' missing key '%s'", i.ConfigMap, common.WorkflowControllerConfigMapKey) + } + var existingConfig controller.WorkflowControllerConfig + err = yaml.Unmarshal([]byte(configStr), &existingConfig) + if err != nil { + log.Fatalf("Failed to load controller configuration: %v", err) + } + if existingConfig.ExecutorImage == wfConfig.ExecutorImage { + fmt.Printf("Existing ConfigMap '%s' up-to-date\n", i.ConfigMap) + return + } + if !i.Upgrade { + log.Fatalf("ConfigMap '%s' requires upgrade. Rerun with --upgrade to update the configuration", i.ConfigMap) + } + existingConfig.ExecutorImage = i.ExecutorImage + configBytes, err := yaml.Marshal(existingConfig) + if err != nil { + log.Fatalf("%+v", errors.InternalWrapError(err)) + } + existingCM.Data = map[string]string{ + common.WorkflowControllerConfigMapKey: string(configBytes), + } + _, err = cmClient.Update(existingCM) + if err != nil { + log.Fatalf("Failed to update ConfigMap '%s' in namespace '%s': %v", i.ConfigMap, i.Namespace, err) + } + fmt.Printf("ConfigMap '%s' updated\n", i.ConfigMap) + } +} diff --git a/install/manifests/01_workflow-crd.yaml b/install/manifests/01_workflow-crd.yaml new file mode 100644 index 000000000000..b875a3df783d --- /dev/null +++ b/install/manifests/01_workflow-crd.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: workflows.argoproj.io +spec: + group: argoproj.io + version: v1alpha1 + scope: Namespaced + names: + kind: Workflow + plural: workflows + shortNames: + - wf diff --git a/install/manifests/02a_workflow-controller-sa.yaml b/install/manifests/02a_workflow-controller-sa.yaml new file mode 100644 index 000000000000..cf37c0d4818c --- /dev/null +++ b/install/manifests/02a_workflow-controller-sa.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo + namespace: kube-system diff --git a/install/manifests/02b_workflow-controller-cluster-role.yaml b/install/manifests/02b_workflow-controller-cluster-role.yaml new file mode 100644 index 000000000000..6c8f464049ca --- /dev/null +++ b/install/manifests/02b_workflow-controller-cluster-role.yaml @@ -0,0 +1,42 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-cluster-role +rules: +- apiGroups: + - "" + resources: + - pods + - pods/exec + verbs: + - create + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete +- apiGroups: + - argoproj.io + resources: + - workflows + verbs: + - get + - list + - watch + - update + - patch diff --git a/install/manifests/02c_workflow-controller-cluster-rolebinding.yaml b/install/manifests/02c_workflow-controller-cluster-rolebinding.yaml new file mode 100644 index 000000000000..231472a307c1 --- /dev/null +++ b/install/manifests/02c_workflow-controller-cluster-rolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-cluster-role +subjects: +- kind: ServiceAccount + name: argo + namespace: kube-system diff --git a/install/manifests/02d_workflow-controller-configmap.yaml b/install/manifests/02d_workflow-controller-configmap.yaml new file mode 100644 index 000000000000..a7de28efec26 --- /dev/null +++ b/install/manifests/02d_workflow-controller-configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: workflow-controller-configmap + namespace: kube-system +data: + config: | + artifactRepository: {} + executorImage: argoproj/argoexec:latest diff --git a/install/manifests/02e_workflow-controller-deployment.yaml b/install/manifests/02e_workflow-controller-deployment.yaml new file mode 100644 index 000000000000..15eb44708b26 --- /dev/null +++ b/install/manifests/02e_workflow-controller-deployment.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: workflow-controller + namespace: kube-system +spec: + selector: + matchLabels: + app: workflow-controller + template: + metadata: + labels: + app: workflow-controller + spec: + serviceAccountName: argo + containers: + - name: workflow-controller + image: argoproj/workflow-controller:latest + command: + - workflow-controller + args: + - --configmap + - workflow-controller-configmap + env: + - name: ARGO_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace diff --git a/install/manifests/03a_argo-ui-sa.yaml b/install/manifests/03a_argo-ui-sa.yaml new file mode 100644 index 000000000000..a9f4707d7443 --- /dev/null +++ b/install/manifests/03a_argo-ui-sa.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-ui + namespace: kube-system diff --git a/install/manifests/03b_argo-ui-cluster-role.yaml b/install/manifests/03b_argo-ui-cluster-role.yaml new file mode 100644 index 000000000000..571f3849c6c6 --- /dev/null +++ b/install/manifests/03b_argo-ui-cluster-role.yaml @@ -0,0 +1,29 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-ui-cluster-role +rules: +- apiGroups: + - "" + resources: + - pods + - pods/exec + - pods/log + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +- apiGroups: + - argoproj.io + resources: + - workflows + verbs: + - get + - list + - watch diff --git a/install/manifests/03c_argo-ui-cluster-rolebinding.yaml b/install/manifests/03c_argo-ui-cluster-rolebinding.yaml new file mode 100644 index 000000000000..76403aa76407 --- /dev/null +++ b/install/manifests/03c_argo-ui-cluster-rolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-ui-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-ui-cluster-role +subjects: +- kind: ServiceAccount + name: argo-ui + namespace: kube-system diff --git a/install/manifests/03d_argo-ui-deployment.yaml b/install/manifests/03d_argo-ui-deployment.yaml new file mode 100644 index 000000000000..48f430537a41 --- /dev/null +++ b/install/manifests/03d_argo-ui-deployment.yaml @@ -0,0 +1,30 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: argo-ui + namespace: kube-system +spec: + selector: + matchLabels: + app: argo-ui + template: + metadata: + labels: + app: argo-ui + spec: + serviceAccountName: argo-ui + containers: + - name: argo-ui + image: argoproj/argoui:latest + env: + - name: ARGO_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: IN_CLUSTER + value: "true" + - name: ENABLE_WEB_CONSOLE + value: "false" + - name: BASE_HREF + value: / diff --git a/install/manifests/03e_argo-ui-service.yaml b/install/manifests/03e_argo-ui-service.yaml new file mode 100644 index 000000000000..2577b0e2dad8 --- /dev/null +++ b/install/manifests/03e_argo-ui-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: argo-ui + namespace: kube-system +spec: + ports: + - port: 80 + targetPort: 8001 + selector: + app: argo-ui