From b063f938f34f650333df6ec5a2e6a325a5b45299 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Thu, 11 Jan 2018 04:21:22 -0800 Subject: [PATCH] Use minimal ClusterRoles for workflow-controller and argo-ui deployments --- cmd/argo/commands/const.go | 61 +++++++++++++- cmd/argo/commands/install.go | 134 +++++++++++++++++++----------- cmd/argo/commands/uninstall.go | 54 +++++++----- demo.md | 22 ++++- test/e2e/e2e_test.go | 6 +- workflow/common/common.go | 1 - workflow/common/util.go | 2 +- workflow/controller/controller.go | 4 +- 8 files changed, 203 insertions(+), 81 deletions(-) diff --git a/cmd/argo/commands/const.go b/cmd/argo/commands/const.go index ef9099aeaf35..0ee615fdea1b 100644 --- a/cmd/argo/commands/const.go +++ b/cmd/argo/commands/const.go @@ -1,8 +1,61 @@ package commands -// Constants used by Argo +import rbacv1 "k8s.io/api/rbac/v1" + const ( - ArgoServiceAccount = "argo" - ArgoClusterRole = "argo-cluster-role" - ArgoServiceName = "argo-ui" + // Argo controller resource constants + ArgoControllerServiceAccount = "argo" + ArgoControllerClusterRole = "argo-cluster-role" + ArgoControllerClusterRoleBinding = "argo-binding" + + // Argo UI resource constants + ArgoUIServiceAccount = "argo-ui" + ArgoUIClusterRole = "argo-ui-cluster-role" + ArgoUIClusterRoleBinding = "argo-ui-binding" + ArgoUIDeploymentName = "argo-ui" + ArgoUIServiceName = "argo-ui" +) + +var ( + ArgoControllerPolicyRules = []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + // TODO(jesse): remove exec privileges when issue #499 is resolved + Resources: []string{"pods", "pods/exec"}, + Verbs: []string{"create", "get", "list", "watch", "update", "patch"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "watch", "list"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"persistentvolumeclaims"}, + Verbs: []string{"create", "delete"}, + }, + { + APIGroups: []string{"argoproj.io"}, + Resources: []string{"workflows"}, + Verbs: []string{"get", "list", "watch", "update", "patch"}, + }, + } + + ArgoUIPolicyRules = []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods", "pods/exec", "pods/log"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"get"}, + }, + { + APIGroups: []string{"argoproj.io"}, + Resources: []string{"workflows"}, + Verbs: []string{"get", "list", "watch"}, + }, + } ) diff --git a/cmd/argo/commands/install.go b/cmd/argo/commands/install.go index ef99a54e466f..574a6a40d4cd 100644 --- a/cmd/argo/commands/install.go +++ b/cmd/argo/commands/install.go @@ -18,7 +18,7 @@ import ( "github.com/spf13/cobra" appsv1beta2 "k8s.io/api/apps/v1beta2" apiv1 "k8s.io/api/core/v1" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierr "k8s.io/apimachinery/pkg/api/errors" @@ -28,8 +28,6 @@ import ( "k8s.io/client-go/kubernetes" ) -const clusterAdmin = "cluster-admin" - var ( // These values may be overridden by the link flags during build // (e.g. imageTag will use the official release tag on tagged builds) @@ -44,34 +42,36 @@ var ( func init() { RootCmd.AddCommand(installCmd) - installCmd.Flags().StringVar(&installArgs.ControllerName, "controller-name", common.DefaultControllerDeploymentName, "name of controller deployment") - installCmd.Flags().StringVar(&installArgs.InstanceID, "instanceid", "", "optional instance id to use for the controller (for multi-controller environments)") - installCmd.Flags().StringVar(&installArgs.UIName, "ui-name", common.DefaultUiDeploymentName, "name of ui deployment") + installCmd.Flags().BoolVar(&installArgs.Upgrade, "upgrade", false, "upgrade controller/ui deployments and configmap if already installed") + installCmd.Flags().BoolVar(&installArgs.DryRun, "dry-run", false, "print the kubernetes manifests to stdout instead of installing") installCmd.Flags().StringVar(&installArgs.Namespace, "install-namespace", common.DefaultControllerNamespace, "install into a specific Namespace") + installCmd.Flags().StringVar(&installArgs.InstanceID, "instanceid", "", "optional instance id to use for the controller (for multi-controller environments)") installCmd.Flags().StringVar(&installArgs.ConfigMap, "configmap", common.DefaultConfigMapName(common.DefaultControllerDeploymentName), "install controller using preconfigured configmap") + installCmd.Flags().StringVar(&installArgs.ControllerName, "controller-name", common.DefaultControllerDeploymentName, "name of controller deployment") installCmd.Flags().StringVar(&installArgs.ControllerImage, "controller-image", DefaultControllerImage, "use a specified controller image") - installCmd.Flags().StringVar(&installArgs.UIImage, "ui-image", DefaultUiImage, "use a specified ui image") - installCmd.Flags().StringVar(&installArgs.ExecutorImage, "executor-image", DefaultExecutorImage, "use a specified executor image") installCmd.Flags().StringVar(&installArgs.ServiceAccount, "service-account", "", "use a specified service account for the workflow-controller deployment") - installCmd.Flags().BoolVar(&installArgs.Upgrade, "upgrade", false, "upgrade controller/ui deployments and configmap if already installed") - installCmd.Flags().BoolVar(&installArgs.EnableWebConsole, "enable-web-console", false, "allows to ssh into running step container using Argo UI") - installCmd.Flags().BoolVar(&installArgs.DryRun, "dry-run", false, "print the kubernetes manifests to stdout instead of installing") + installCmd.Flags().StringVar(&installArgs.ExecutorImage, "executor-image", DefaultExecutorImage, "use a specified executor image") + installCmd.Flags().StringVar(&installArgs.UIName, "ui-name", ArgoUIDeploymentName, "name of ui deployment") + installCmd.Flags().StringVar(&installArgs.UIImage, "ui-image", DefaultUiImage, "use a specified ui image") + installCmd.Flags().StringVar(&installArgs.UIServiceAccount, "ui-service-account", "", "use a specified service account for the argo-ui deployment") + installCmd.Flags().BoolVar(&installArgs.EnableWebConsole, "enable-web-console", false, "allows exec access into running step container using Argo UI") } // InstallFlags has all the required parameters for installing Argo. type InstallFlags struct { - ControllerName string // --controller-name - InstanceID string // --instanceid - UIName string // --ui-name + Upgrade bool // --upgrade + DryRun bool // --dry-run Namespace string // --install-namespace + InstanceID string // --instanceid ConfigMap string // --configmap + ControllerName string // --controller-name ControllerImage string // --controller-image - UIImage string // --ui-image - ExecutorImage string // --executor-image ServiceAccount string // --service-account - Upgrade bool // --upgrade + ExecutorImage string // --executor-image + UIName string // --ui-name + UIImage string // --ui-image + UIServiceAccount string // --ui-service-account EnableWebConsole bool // --enable-web-console - DryRun bool // --dry-run } var installArgs InstallFlags @@ -98,22 +98,19 @@ func Install(cmd *cobra.Command, args InstallFlags) { kubernetesVersionCheck(clientset) } installCRD(clientset, args) - if args.ServiceAccount == "" { - if clusterAdminExists(clientset) { - seviceAccountName := ArgoServiceAccount - createServiceAccount(clientset, seviceAccountName, args) - createClusterRoleBinding(clientset, seviceAccountName, args) - args.ServiceAccount = seviceAccountName - } + if args.ServiceAccount == "" && clusterAdminExists(clientset) { + createServiceAccount(clientset, ArgoControllerServiceAccount, args) + createClusterRole(clientset, ArgoControllerClusterRole, ArgoControllerPolicyRules, args) + createClusterRoleBinding(clientset, ArgoControllerClusterRoleBinding, ArgoControllerServiceAccount, ArgoControllerClusterRole, args) + args.ServiceAccount = ArgoControllerServiceAccount + } + if args.UIServiceAccount == "" && clusterAdminExists(clientset) { + createServiceAccount(clientset, ArgoUIServiceAccount, args) + createClusterRole(clientset, ArgoUIClusterRole, ArgoUIPolicyRules, args) + createClusterRoleBinding(clientset, ArgoUIClusterRoleBinding, ArgoUIServiceAccount, ArgoUIClusterRole, args) + args.UIServiceAccount = ArgoUIServiceAccount } installConfigMap(clientset, args) - if !args.DryRun { - if args.ServiceAccount == "" { - fmt.Printf("Using default service account for deployments\n") - } else { - fmt.Printf("Using service account '%s' for deployments\n", args.ServiceAccount) - } - } installController(clientset, args) installUI(clientset, args) installUIService(clientset, args) @@ -124,8 +121,9 @@ func install(cmd *cobra.Command, args []string) { } func clusterAdminExists(clientset *kubernetes.Clientset) bool { - clusterRoles := clientset.RbacV1beta1().ClusterRoles() - _, err := clusterRoles.Get(clusterAdmin, metav1.GetOptions{}) + // TODO: change this method to check if RBAC is enabled + clusterRoles := clientset.RbacV1().ClusterRoles() + _, err := clusterRoles.Get("cluster-admin", metav1.GetOptions{}) if err != nil { if apierr.IsNotFound(err) { return false @@ -161,23 +159,65 @@ func createServiceAccount(clientset *kubernetes.Clientset, serviceAccountName st fmt.Printf("ServiceAccount '%s' created\n", serviceAccountName) } -func createClusterRoleBinding(clientset *kubernetes.Clientset, serviceAccountName string, args InstallFlags) { - roleBinding := rbacv1beta1.ClusterRoleBinding{ +func createClusterRole(clientset *kubernetes.Clientset, clusterRoleName string, rules []rbacv1.PolicyRule, args InstallFlags) { + clusterRole := rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleName, + }, + Rules: rules, + } + if args.DryRun { + printYAML(clusterRole) + return + } + crclient := clientset.RbacV1().ClusterRoles() + _, err := crclient.Create(&clusterRole) + if err != nil { + if !apierr.IsAlreadyExists(err) { + log.Fatalf("Failed to create ClusterRole '%s': %v\n", clusterRoleName, err) + } + existingClusterRole, err := crclient.Get(clusterRoleName, metav1.GetOptions{}) + if err != nil { + log.Fatalf("Failed to get ClusterRole '%s': %v\n", clusterRoleName, err) + } + if !reflect.DeepEqual(existingClusterRole.Rules, clusterRole.Rules) { + if !args.Upgrade { + log.Fatalf("ClusterRole '%s' requires upgrade. Rerun with --upgrade to update the configuration", clusterRoleName) + } + _, err = crclient.Update(&clusterRole) + if err != nil { + log.Fatalf("Failed to update ClusterRole '%s': %v\n", clusterRoleName, err) + } + fmt.Printf("ClusterRole '%s' updated\n", clusterRoleName) + } else { + fmt.Printf("Existing ClusterRole '%s' up-to-date\n", clusterRoleName) + } + } else { + fmt.Printf("ClusterRole '%s' created\n", clusterRoleName) + } +} + +func createClusterRoleBinding(clientset *kubernetes.Clientset, clusterBindingRoleName, serviceAccountName, clusterRoleName string, args InstallFlags) { + roleBinding := rbacv1.ClusterRoleBinding{ TypeMeta: metav1.TypeMeta{ APIVersion: "rbac.authorization.k8s.io/v1beta1", Kind: "ClusterRoleBinding", }, ObjectMeta: metav1.ObjectMeta{ - Name: ArgoClusterRole, + Name: clusterBindingRoleName, }, - RoleRef: rbacv1beta1.RoleRef{ + RoleRef: rbacv1.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", - Name: clusterAdmin, + Name: clusterRoleName, }, - Subjects: []rbacv1beta1.Subject{ + Subjects: []rbacv1.Subject{ { - Kind: rbacv1beta1.ServiceAccountKind, + Kind: rbacv1.ServiceAccountKind, Name: serviceAccountName, Namespace: args.Namespace, }, @@ -187,15 +227,15 @@ func createClusterRoleBinding(clientset *kubernetes.Clientset, serviceAccountNam printYAML(roleBinding) return } - _, err := clientset.RbacV1beta1().ClusterRoleBindings().Create(&roleBinding) + _, err := clientset.RbacV1().ClusterRoleBindings().Create(&roleBinding) if err != nil { if !apierr.IsAlreadyExists(err) { - log.Fatalf("Failed to create ClusterRoleBinding %s: %v\n", ArgoClusterRole, err) + log.Fatalf("Failed to create ClusterRoleBinding %s: %v\n", clusterBindingRoleName, err) } - fmt.Printf("ClusterRoleBinding '%s' already exists\n", ArgoClusterRole) + fmt.Printf("ClusterRoleBinding '%s' already exists\n", clusterBindingRoleName) return } - fmt.Printf("ClusterRoleBinding '%s' created, bound '%s' to '%s'\n", ArgoClusterRole, serviceAccountName, clusterAdmin) + fmt.Printf("ClusterRoleBinding '%s' created, bound '%s' to '%s'\n", clusterBindingRoleName, serviceAccountName, clusterRoleName) } func kubernetesVersionCheck(clientset *kubernetes.Clientset) { @@ -364,7 +404,7 @@ func installUI(clientset *kubernetes.Clientset, args InstallFlags) { }, }, Spec: apiv1.PodSpec{ - ServiceAccountName: args.ServiceAccount, + ServiceAccountName: args.UIServiceAccount, Containers: []apiv1.Container{ { Name: args.UIName, @@ -464,7 +504,7 @@ func upgradeNeeded(dep1, dep2 *appsv1beta2.Deployment) bool { } func installUIService(clientset *kubernetes.Clientset, args InstallFlags) { - svcName := ArgoServiceName + svcName := ArgoUIServiceName svcClient := clientset.CoreV1().Services(args.Namespace) uiSvc := apiv1.Service{ TypeMeta: metav1.TypeMeta{ diff --git a/cmd/argo/commands/uninstall.go b/cmd/argo/commands/uninstall.go index 5a3b219006ae..3f952d9a1ab2 100644 --- a/cmd/argo/commands/uninstall.go +++ b/cmd/argo/commands/uninstall.go @@ -15,7 +15,7 @@ import ( func init() { RootCmd.AddCommand(uninstallCmd) uninstallCmd.Flags().StringVar(&uninstallArgs.controllerName, "controller-name", common.DefaultControllerDeploymentName, "name of controller deployment") - uninstallCmd.Flags().StringVar(&uninstallArgs.uiName, "ui-name", common.DefaultUiDeploymentName, "name of ui deployment") + uninstallCmd.Flags().StringVar(&uninstallArgs.uiName, "ui-name", ArgoUIDeploymentName, "name of ui deployment") uninstallCmd.Flags().StringVar(&uninstallArgs.configMap, "configmap", common.DefaultConfigMapName(common.DefaultControllerDeploymentName), "name of configmap to uninstall") uninstallCmd.Flags().StringVar(&uninstallArgs.namespace, "install-namespace", common.DefaultControllerNamespace, "uninstall from a specific namespace") } @@ -40,14 +40,14 @@ func uninstall(cmd *cobra.Command, args []string) { fmt.Printf("Uninstalling from namespace '%s'\n", uninstallArgs.namespace) // Delete the UI service svcClient := clientset.CoreV1().Services(uninstallArgs.namespace) - err := svcClient.Delete(ArgoServiceName, &metav1.DeleteOptions{}) + err := svcClient.Delete(ArgoUIServiceName, &metav1.DeleteOptions{}) if err != nil { if !apierr.IsNotFound(err) { - log.Fatalf("Failed to delete service '%s': %v", ArgoServiceName, err) + log.Fatalf("Failed to delete service '%s': %v", ArgoUIServiceName, err) } - fmt.Printf("Service '%s' in namespace '%s' not found\n", ArgoServiceName, uninstallArgs.namespace) + fmt.Printf("Service '%s' in namespace '%s' not found\n", ArgoUIServiceName, uninstallArgs.namespace) } else { - fmt.Printf("Service '%s' deleted\n", ArgoServiceName) + fmt.Printf("Service '%s' deleted\n", ArgoUIServiceName) } // Delete the UI and workflow-controller deployment @@ -77,24 +77,40 @@ func uninstall(cmd *cobra.Command, args []string) { fmt.Printf("ConfigMap '%s' deleted\n", uninstallArgs.configMap) } - // Delete role binding - if err := clientset.RbacV1beta1().ClusterRoleBindings().Delete(ArgoClusterRole, &metav1.DeleteOptions{}); err != nil { - if !apierr.IsNotFound(err) { - log.Fatalf("Failed to check clusterRoleBinding: %v\n", err) + // Delete controller and UI role binding + for _, bindingName := range []string{ArgoControllerClusterRoleBinding, ArgoUIClusterRoleBinding} { + if err := clientset.RbacV1().ClusterRoleBindings().Delete(bindingName, &metav1.DeleteOptions{}); err != nil { + if !apierr.IsNotFound(err) { + log.Fatalf("Failed to delete ClusterRoleBinding: %v\n", err) + } + fmt.Printf("ClusterRoleBinding '%s' not found\n", bindingName) + } else { + fmt.Printf("ClusterRoleBinding '%s' deleted\n", bindingName) } - fmt.Printf("ClusterRoleBinding '%s' not found\n", ArgoClusterRole) - } else { - fmt.Printf("ClusterRoleBinding '%s' deleted\n", ArgoClusterRole) } - // Delete service account - if err := clientset.CoreV1().ServiceAccounts(uninstallArgs.namespace).Delete(ArgoServiceAccount, &metav1.DeleteOptions{}); err != nil { - if !apierr.IsNotFound(err) { - log.Fatalf("Failed to get service accounts: %v\n", err) + // Delete controller and UI the cluster role + for _, roleName := range []string{ArgoControllerClusterRole, ArgoUIClusterRole} { + if err := clientset.RbacV1().ClusterRoles().Delete(roleName, &metav1.DeleteOptions{}); err != nil { + if !apierr.IsNotFound(err) { + log.Fatalf("Failed to delete ClusterRole: %v\n", err) + } + fmt.Printf("ClusterRole '%s' not found\n", roleName) + } else { + fmt.Printf("ClusterRole '%s' deleted\n", roleName) + } + } + + // Delete controller and UI service account + for _, serviceAccount := range []string{ArgoControllerServiceAccount, ArgoUIServiceAccount} { + if err := clientset.CoreV1().ServiceAccounts(uninstallArgs.namespace).Delete(serviceAccount, &metav1.DeleteOptions{}); err != nil { + if !apierr.IsNotFound(err) { + log.Fatalf("Failed to delete ServiceAccount: %v\n", err) + } + fmt.Printf("ServiceAccount '%s' in namespace '%s' not found\n", serviceAccount, uninstallArgs.namespace) + } else { + fmt.Printf("ServiceAccount '%s' deleted\n", serviceAccount) } - fmt.Printf("ServiceAccount '%s' in namespace '%s' not found\n", ArgoServiceAccount, uninstallArgs.namespace) - } else { - fmt.Printf("ServiceAccount '%s' deleted\n", ArgoServiceAccount) } // Delete the workflow CRD diff --git a/demo.md b/demo.md index b9ebc46a7093..f5c2c1e651c2 100644 --- a/demo.md +++ b/demo.md @@ -23,7 +23,12 @@ $ chmod +x /usr/local/bin/argo ``` $ argo install ``` -NOTE: the instructions below assume the installation of argo into the `kube-system` namespace (the default behavior). A different namespace can be chosen using the `argo install --install-namespace ` flag, in which case you should substitute `kube-system` with your chosen namespace in the examples below. +NOTE: +* On GKE with RBAC enabled, you may need to grant your account the ability to create new cluster roles +``` +$ kubectl create clusterrolebinding YOURNAME-cluster-admin-binding --clusterrole=cluster-admin --user=YOUREMAIL@gmail.com +``` +* The subsequent instructions below assume the installation of argo into the `kube-system` namespace (the default behavior). A different namespace can be chosen using the `argo install --install-namespace ` flag, in which case you should substitute `kube-system` with your chosen namespace in the examples below. ## 3. Configure the service account to run workflows (required for RBAC clusters) For clusters with RBAC enabled, the 'default' service account is too limited to do any kind of meaningful work. Run the following command to grant admin privileges to the 'default' service account in the namespace 'default': @@ -114,14 +119,23 @@ $ argo submit https://raw.githubusercontent.com/argoproj/argo/master/examples/ar By default, the Argo UI service is not exposed with an external IP. To access the UI, use one of the following methods: -#### Method 1: kubectl proxy +#### Method 1: kubectl port-forward +Run: +``` +$ kubectl port-forward $(kubectl get pods -n kube-system -l app=argo-ui -o jsonpath='{.items[0].metadata.name}') -n kube-system 8001:8001 +``` +Then visit: http://127.0.0.1:8001/ + +#### Method 2: kubectl proxy Run: ``` $ kubectl proxy ``` -Then visit the following URL in your browser: http://127.0.0.1:8001/api/v1/proxy/namespaces/kube-system/services/argo-ui/ +Then visit: http://127.0.0.1:8001/api/v1/proxy/namespaces/kube-system/services/argo-ui/ + +NOTE: artifact download and webconsole is not supported using this method -#### Method 2: Use a LoadBalancer +#### Method 3: Use a LoadBalancer Update the argo-ui service to be of type `LoadBalancer`. ``` diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 546fc03ab1cd..baca1a870bab 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -38,7 +38,7 @@ func getKubernetesClient() *kubernetes.Clientset { func newInstallArgs(namespace string) commands.InstallFlags { return commands.InstallFlags{ ControllerName: common.DefaultControllerDeploymentName, - UIName: common.DefaultUiDeploymentName, + UIName: commands.ArgoUIDeploymentName, Namespace: namespace, ConfigMap: common.DefaultConfigMapName(common.DefaultControllerDeploymentName), ControllerImage: "argoproj/workflow-controller:latest", @@ -55,7 +55,7 @@ func createNamespaceForTest() string { GenerateName: "argo-e2e-test-", }, } - cns, err := clientset.Core().Namespaces().Create(ns) + cns, err := clientset.CoreV1().Namespaces().Create(ns) if err != nil { panic(err) } @@ -66,7 +66,7 @@ func createNamespaceForTest() string { func deleteTestNamespace(namespace string) error { clientset := getKubernetesClient() deleteOptions := metav1.DeleteOptions{} - return clientset.Core().Namespaces().Delete(namespace, &deleteOptions) + return clientset.CoreV1().Namespaces().Delete(namespace, &deleteOptions) } func installArgoInNamespace(namespace string) { diff --git a/workflow/common/common.go b/workflow/common/common.go index e12175e0c57a..9cc64878dddb 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -7,7 +7,6 @@ import ( const ( // DefaultControllerDeploymentName is the default deployment name of the workflow controller DefaultControllerDeploymentName = "workflow-controller" - DefaultUiDeploymentName = "argo-ui" // DefaultControllerNamespace is the default namespace where the workflow controller is installed DefaultControllerNamespace = "kube-system" diff --git a/workflow/common/util.go b/workflow/common/util.go index 498085842641..3609013fb666 100644 --- a/workflow/common/util.go +++ b/workflow/common/util.go @@ -281,7 +281,7 @@ func addPodMetadata(c kubernetes.Interface, field, podName, namespace, key, valu return errors.InternalWrapError(err) } for attempt := 0; attempt < patchRetries; attempt++ { - _, err = c.Core().Pods(namespace).Patch(podName, types.MergePatchType, patch) + _, err = c.CoreV1().Pods(namespace).Patch(podName, types.MergePatchType, patch) if err != nil { if !apierr.IsConflict(err) { return err diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index f144b69f1cc8..b497d0d65c23 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -359,7 +359,7 @@ func (wfc *WorkflowController) watchControllerConfigMap(ctx context.Context) (ca } func (wfc *WorkflowController) newControllerConfigMapWatch() *cache.ListWatch { - c := wfc.kubeclientset.Core().RESTClient() + c := wfc.kubeclientset.CoreV1().RESTClient() resource := "configmaps" name := wfc.ConfigMap namespace := wfc.ConfigMapNS @@ -386,7 +386,7 @@ func (wfc *WorkflowController) newControllerConfigMapWatch() *cache.ListWatch { } func (wfc *WorkflowController) newWorkflowPodWatch() *cache.ListWatch { - c := wfc.kubeclientset.Core().RESTClient() + c := wfc.kubeclientset.CoreV1().RESTClient() resource := "pods" namespace := wfc.Config.Namespace fieldSelector := fields.ParseSelectorOrDie("status.phase!=Pending")