diff --git a/Makefile b/Makefile index 0b1a6a0aee0f..d532b826d128 100644 --- a/Makefile +++ b/Makefile @@ -97,6 +97,7 @@ SWAGGER_FILES := pkg/apiclient/_.primary.swagger.json \ pkg/apiclient/event/event.swagger.json \ pkg/apiclient/eventsource/eventsource.swagger.json \ pkg/apiclient/info/info.swagger.json \ + pkg/apiclient/pipeline/pipeline.swagger.json \ pkg/apiclient/sensor/sensor.swagger.json \ pkg/apiclient/workflow/workflow.swagger.json \ pkg/apiclient/workflowarchive/workflow-archive.swagger.json \ @@ -249,6 +250,7 @@ swagger: \ pkg/apiclient/eventsource/eventsource.swagger.json \ pkg/apiclient/info/info.swagger.json \ pkg/apiclient/sensor/sensor.swagger.json \ + pkg/apiclient/pipeline/pipeline.swagger.json \ pkg/apiclient/workflow/workflow.swagger.json \ pkg/apiclient/workflowarchive/workflow-archive.swagger.json \ pkg/apiclient/workflowtemplate/workflow-template.swagger.json \ @@ -334,6 +336,9 @@ pkg/apiclient/info/info.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/i pkg/apiclient/sensor/sensor.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/sensor/sensor.proto $(call protoc,pkg/apiclient/sensor/sensor.proto) +pkg/apiclient/pipeline/pipeline.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/pipeline/pipeline.proto + $(call protoc,pkg/apiclient/pipeline/pipeline.proto) + pkg/apiclient/workflow/workflow.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflow/workflow.proto $(call protoc,pkg/apiclient/workflow/workflow.proto) diff --git a/api/jsonschema/schema.json b/api/jsonschema/schema.json index 117fb32d5ed9..c1b7805ba304 100644 --- a/api/jsonschema/schema.json +++ b/api/jsonschema/schema.json @@ -70,6 +70,905 @@ }, "type": "object" }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSCredentials": { + "properties": { + "accessKeyId": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "secretAccessKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSEndpoint": { + "properties": { + "url": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Backoff": { + "properties": { + "FactorPercentage": { + "title": "+kubebuilder:default=200", + "type": "integer" + }, + "cap": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration", + "title": "+kubebuilder:default=\"0ms\"" + }, + "duration": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration", + "title": "+kubebuilder:default=\"100ms\"" + }, + "jitterPercentage": { + "title": "the amount of jitter per step, typically 10-20%, \u003e100% is valid, but strange\n+kubebuilder:default=10", + "type": "integer" + }, + "steps": { + "format": "uint64", + "title": "the number of backoff steps, zero means no retries\n+kubebuilder:default=20", + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cat": { + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Code": { + "properties": { + "runtime": { + "type": "string" + }, + "source": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Container": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "in": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Interface" + }, + "resources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "volumeMounts": { + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" + }, + "type": "array" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cron": { + "properties": { + "layout": { + "title": "+kubebuilder:default=\"2006-01-02T15:04:05Z07:00\"", + "type": "string" + }, + "schedule": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSource": { + "properties": { + "value": { + "type": "string" + }, + "valueFrom": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSourceFrom" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSourceFrom": { + "properties": { + "secretKeyRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSink": { + "properties": { + "actions": { + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLAction" + }, + "type": "array" + }, + "database": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSource": { + "properties": { + "commitInterval": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration", + "title": "+kubebuilder:default=\"5s\"" + }, + "database": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database" + }, + "initSchema": { + "title": "+kubebuilder:default=true", + "type": "boolean" + }, + "offsetColumn": { + "type": "string" + }, + "pollInterval": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration", + "title": "+kubebuilder:default=\"1s\"" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database": { + "properties": { + "dataSource": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSource" + }, + "driver": { + "title": "+kubebuilder:default=default", + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Dedupe": { + "properties": { + "maxSize": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", + "title": "MaxSize is the maximum number of entries to keep in the in-memory database used to store recent UIDs.\nLarger number mean bigger windows of time for dedupe, but greater memory usage.\n+kubebuilder:default=\"1M\"" + }, + "uid": { + "title": "+kubebuilder:default=\"sha1(msg)\"", + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Expand": { + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Flatten": { + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Git": { + "properties": { + "branch": { + "title": "+kubebuilder:default=main", + "type": "string" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "passwordSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "title": "PasswordSecret is the secret selector to the repository password" + }, + "path": { + "description": "+kubebuilder:default=.", + "type": "string" + }, + "sshPrivateKeySecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "title": "SSHPrivateKeySecret is the secret selector to the repository ssh private key" + }, + "url": { + "type": "string" + }, + "usernameSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "title": "UsernameSecret is the secret selector to the repository username" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Group": { + "properties": { + "endOfGroup": { + "type": "string" + }, + "format": { + "type": "string" + }, + "key": { + "type": "string" + }, + "storage": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Storage" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTP": { + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeader": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeaderSource" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeaderSource": { + "properties": { + "secretKeyRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSink": { + "properties": { + "headers": { + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeader" + }, + "type": "array" + }, + "url": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSource": { + "properties": { + "serviceName": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Interface": { + "properties": { + "fifo": { + "type": "boolean" + }, + "http": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTP" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka": { + "properties": { + "kafkaConfig": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaConfig" + }, + "name": { + "title": "+kubebuilder:default=default", + "type": "string" + }, + "topic": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaConfig": { + "properties": { + "brokers": { + "items": { + "type": "string" + }, + "type": "array" + }, + "net": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaNET" + }, + "version": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaNET": { + "properties": { + "sasl": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SASL" + }, + "tls": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.TLS" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSink": { + "properties": { + "async": { + "type": "boolean" + }, + "kafka": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSource": { + "properties": { + "kafka": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka" + }, + "startOffset": { + "title": "+kubebuilder:default=Last", + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Log": { + "properties": { + "truncate": { + "format": "uint64", + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metadata": { + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metrics": { + "properties": { + "errors": { + "format": "uint64", + "type": "string" + }, + "rate": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "retries": { + "format": "uint64", + "title": "current rate of messages per second", + "type": "string" + }, + "total": { + "format": "uint64", + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline": { + "properties": { + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineSpec" + }, + "status": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineStatus" + } + }, + "title": "+kubebuilder:object:root=true\n+kubebuilder:resource:shortName=pl\n+kubebuilder:subresource:status\n+kubebuilder:printcolumn:name=\"Phase\",type=string,JSONPath=`.status.phase`\n+kubebuilder:printcolumn:name=\"Message\",type=string,JSONPath=`.status.message`", + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineList": { + "properties": { + "items": { + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline" + }, + "type": "array" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineSpec": { + "properties": { + "steps": { + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec" + }, + "title": "+patchStrategy=merge\n+patchMergeKey=name", + "type": "array" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineStatus": { + "properties": { + "conditions": { + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "type": "array" + }, + "lastUpdated": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "type": "string" + }, + "phase": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3": { + "properties": { + "bucket": { + "type": "string" + }, + "credentials": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSCredentials" + }, + "endpoint": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSEndpoint" + }, + "name": { + "title": "+kubebuilder:default=default", + "type": "string" + }, + "region": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Sink": { + "properties": { + "s3": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Source": { + "properties": { + "pollPeriod": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration", + "title": "+kubebuilder:default=\"1m\"" + }, + "s3": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SASL": { + "properties": { + "mechanism": { + "title": "SASLMechanism is the name of the enabled SASL mechanism.\nPossible values: OAUTHBEARER, PLAIN (defaults to PLAIN).\n+optional", + "type": "string" + }, + "password": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "title": "Password for SASL/PLAIN authentication" + }, + "user": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "title": "User is the authentication identity (authcid) to present for\nSASL/PLAIN or SASL/SCRAM authentication" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLAction": { + "properties": { + "onError": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement" + }, + "onRecordNotFound": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement" + }, + "statement": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "sql": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN": { + "properties": { + "auth": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STANAuth" + }, + "clusterId": { + "type": "string" + }, + "maxInflight": { + "title": "Max inflight messages when subscribing to the stan server, which means how many messages\nbetween commits, therefore potential duplicates during disruption\n+kubebuilder:default=20", + "type": "integer" + }, + "name": { + "title": "+kubebuilder:default=default", + "type": "string" + }, + "natsMonitoringUrl": { + "type": "string" + }, + "natsUrl": { + "type": "string" + }, + "subject": { + "type": "string" + }, + "subjectPrefix": { + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STANAuth": { + "properties": { + "token": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Scale": { + "properties": { + "maxReplicas": { + "type": "integer" + }, + "minReplicas": { + "type": "integer" + }, + "replicaRatio": { + "title": "takes precedence over min", + "type": "integer" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sidecar": { + "properties": { + "resources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements", + "title": "+kubebuilder:default={limits: {\"cpu\": \"500m\", \"memory\": \"256Mi\"}, requests: {\"cpu\": \"100m\", \"memory\": \"64Mi\"}}" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sink": { + "properties": { + "db": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSink" + }, + "http": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSink" + }, + "kafka": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSink" + }, + "log": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Log" + }, + "name": { + "title": "+kubebuilder:default=default", + "type": "string" + }, + "s3": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Sink" + }, + "stan": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Source": { + "properties": { + "cron": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cron" + }, + "db": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSource" + }, + "http": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSource" + }, + "kafka": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSource" + }, + "name": { + "title": "+kubebuilder:default=default", + "type": "string" + }, + "retry": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Backoff", + "title": "+kubebuilder:default={duration: \"100ms\", steps: 20, factorPercentage: 200, jitterPercentage: 10}" + }, + "s3": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Source" + }, + "stan": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SourceStatus": { + "properties": { + "metrics": { + "additionalProperties": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metrics" + }, + "type": "object" + }, + "pending": { + "format": "uint64", + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Step": { + "properties": { + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec" + }, + "status": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepStatus" + } + }, + "title": "+kubebuilder:object:root=true\n+kubebuilder:subresource:status\n+kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector\n+kubebuilder:printcolumn:name=\"Phase\",type=string,JSONPath=`.status.phase`\n+kubebuilder:printcolumn:name=\"Reason\",type=string,JSONPath=`.status.reason`\n+kubebuilder:printcolumn:name=\"Message\",type=string,JSONPath=`.status.message`\n+kubebuilder:printcolumn:name=\"Desired\",type=string,JSONPath=`.spec.replicas`\n+kubebuilder:printcolumn:name=\"Current\",type=string,JSONPath=`.status.replicas`", + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec": { + "properties": { + "affinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity" + }, + "cat": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cat" + }, + "code": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Code" + }, + "container": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Container" + }, + "dedupe": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Dedupe" + }, + "expand": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Expand" + }, + "filter": { + "type": "string" + }, + "flatten": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Flatten" + }, + "git": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Git" + }, + "group": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Group" + }, + "imagePullSecrets": { + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "title": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images\nin pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets\ncan be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.\nMore info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod\n+patchStrategy=merge\n+patchMergeKey=name", + "type": "array" + }, + "map": { + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metadata" + }, + "name": { + "title": "+kubebuilder:default=default", + "type": "string" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "replicas": { + "title": "+kubebuilder:default=1", + "type": "integer" + }, + "restartPolicy": { + "title": "+kubebuilder:default=OnFailure", + "type": "string" + }, + "scale": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Scale" + }, + "serviceAccountName": { + "title": "+kubebuilder:default=pipeline", + "type": "string" + }, + "sidecar": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sidecar", + "title": "+kubebuilder:default={resources: {limits: {\"cpu\": \"500m\", \"memory\": \"256Mi\"}, requests: {\"cpu\": \"100m\", \"memory\": \"64Mi\"}}}" + }, + "sinks": { + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sink" + }, + "title": "+patchStrategy=merge\n+patchMergeKey=name", + "type": "array" + }, + "sources": { + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Source" + }, + "title": "+patchStrategy=merge\n+patchMergeKey=name", + "type": "array" + }, + "terminator": { + "type": "boolean" + }, + "tolerations": { + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + }, + "type": "array" + }, + "volumes": { + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Volume" + }, + "title": "+patchStrategy=merge\n+patchMergeKey=name", + "type": "array" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepStatus": { + "properties": { + "lastScaledAt": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "type": "string" + }, + "phase": { + "type": "string" + }, + "reason": { + "type": "string" + }, + "replicas": { + "type": "integer" + }, + "selector": { + "type": "string" + }, + "sinkStatuses": { + "additionalProperties": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SourceStatus" + }, + "type": "object" + }, + "sourceStatuses": { + "additionalProperties": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SourceStatus" + }, + "type": "object" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Storage": { + "properties": { + "name": { + "type": "string" + }, + "subPath": { + "title": "volume name", + "type": "string" + } + }, + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.TLS": { + "properties": { + "caCertSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "title": "CACertSecret refers to the secret that contains the CA cert" + }, + "certSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "title": "CertSecret refers to the secret that contains the cert" + }, + "keySecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "title": "KeySecret refers to the secret that contains the key" + } + }, + "type": "object" + }, "google.protobuf.Any": { "properties": { "type_url": { @@ -9321,6 +10220,37 @@ "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "type": "string" }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Condition": { + "description": "// other fields\n}", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "title": "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:Type=string\n+kubebuilder:validation:Format=date-time" + }, + "message": { + "title": "message is a human readable message indicating details about the transition.\nThis may be an empty string.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:MaxLength=32768", + "type": "string" + }, + "observedGeneration": { + "title": "observedGeneration represents the .metadata.generation that the condition was set based upon.\nFor instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date\nwith respect to the current state of the instance.\n+optional\n+kubebuilder:validation:Minimum=0", + "type": "string" + }, + "reason": { + "title": "reason contains a programmatic identifier indicating the reason for the condition's last transition.\nProducers of specific condition types may define expected values and meanings for this field,\nand whether the values are considered a guaranteed API.\nThe value should be a CamelCase string.\nThis field may not be empty.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:MaxLength=1024\n+kubebuilder:validation:MinLength=1\n+kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`", + "type": "string" + }, + "status": { + "title": "status of the condition, one of True, False, Unknown.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:Enum=True;False;Unknown", + "type": "string" + }, + "type": { + "title": "type of condition in CamelCase or in foo.example.com/CamelCase.\n---\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions can be\nuseful (see .node.status.conditions), the ability to deconflict is important.\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`\n+kubebuilder:validation:MaxLength=316", + "type": "string" + } + }, + "title": "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\ntype FooStatus struct{\n // Represents the observations of a foo's current state.\n // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n // +patchMergeKey=type\n // +patchStrategy=merge\n // +listType=map\n // +listMapKey=type\n Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`", + "type": "object" + }, "io.k8s.apimachinery.pkg.apis.meta.v1.CreateOptions": { "description": "CreateOptions may be provided when creating an API object.", "properties": { @@ -9338,6 +10268,15 @@ }, "type": "object" }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Duration": { + "description": "Duration is a wrapper around time.Duration which supports correct\nmarshaling to YAML and JSON. In particular, it marshals into strings, which\ncan be used as map keys in json.", + "properties": { + "duration": { + "type": "string" + } + }, + "type": "object" + }, "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1": { "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", "type": "object" @@ -9645,6 +10584,55 @@ "io.k8s.apimachinery.pkg.util.intstr.IntOrString": { "type": "string" }, + "pipeline.DeletePipelineResponse": { + "type": "object" + }, + "pipeline.LogEntry": { + "properties": { + "msg": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "pipelineName": { + "type": "string" + }, + "stepName": { + "type": "string" + }, + "time": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + }, + "title": "structured log entry", + "type": "object" + }, + "pipeline.PipelineWatchEvent": { + "properties": { + "object": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline" + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, + "pipeline.RestartPipelineResponse": { + "type": "object" + }, + "pipeline.StepWatchEvent": { + "properties": { + "object": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Step" + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, "sensor.CreateSensorRequest": { "properties": { "createOptions": { diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index fa847a9d4a13..70973fa71f73 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -1190,6 +1190,245 @@ } } }, + "/api/v1/pipelines/{namespace}": { + "get": { + "tags": [ + "PipelineService" + ], + "operationId": "PipelineService_ListPipelines", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.", + "name": "listOptions.labelSelector", + "in": "query" + }, + { + "type": "string", + "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.", + "name": "listOptions.fieldSelector", + "in": "query" + }, + { + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.", + "name": "listOptions.watch", + "in": "query" + }, + { + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n+optional.", + "name": "listOptions.allowWatchBookmarks", + "in": "query" + }, + { + "type": "string", + "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "listOptions.resourceVersion", + "in": "query" + }, + { + "type": "string", + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls.\nIt is highly recommended that resourceVersionMatch be set for list calls where\nresourceVersion is set\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "listOptions.resourceVersionMatch", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.", + "name": "listOptions.timeoutSeconds", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.", + "name": "listOptions.limit", + "in": "query" + }, + { + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", + "name": "listOptions.continue", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, + "/api/v1/pipelines/{namespace}/{name}": { + "get": { + "tags": [ + "PipelineService" + ], + "operationId": "PipelineService_GetPipeline", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "getOptions.resourceVersion", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + }, + "delete": { + "tags": [ + "PipelineService" + ], + "operationId": "PipelineService_DeletePipeline", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "int64", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.", + "name": "deleteOptions.gracePeriodSeconds", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the target UID.\n+optional.", + "name": "deleteOptions.preconditions.uid", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the target ResourceVersion\n+optional.", + "name": "deleteOptions.preconditions.resourceVersion", + "in": "query" + }, + { + "type": "boolean", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.", + "name": "deleteOptions.orphanDependents", + "in": "query" + }, + { + "type": "string", + "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.", + "name": "deleteOptions.propagationPolicy", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.", + "name": "deleteOptions.dryRun", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/pipeline.DeletePipelineResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, + "/api/v1/pipelines/{namespace}/{name}/restart": { + "post": { + "tags": [ + "PipelineService" + ], + "operationId": "PipelineService_RestartPipeline", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/pipeline.RestartPipelineResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, "/api/v1/sensors/{namespace}": { "get": { "tags": [ @@ -1789,12 +2028,12 @@ } } }, - "/api/v1/stream/sensors/{namespace}": { + "/api/v1/stream/pipelines/{namespace}": { "get": { "tags": [ - "SensorService" + "PipelineService" ], - "operationId": "SensorService_WatchSensors", + "operationId": "PipelineService_WatchPipelines", "parameters": [ { "type": "string", @@ -1864,13 +2103,13 @@ "description": "A successful response.(streaming responses)", "schema": { "type": "object", - "title": "Stream result of sensor.SensorWatchEvent", + "title": "Stream result of pipeline.PipelineWatchEvent", "properties": { "error": { "$ref": "#/definitions/grpc.gateway.runtime.StreamError" }, "result": { - "$ref": "#/definitions/sensor.SensorWatchEvent" + "$ref": "#/definitions/pipeline.PipelineWatchEvent" } } } @@ -1884,12 +2123,12 @@ } } }, - "/api/v1/stream/sensors/{namespace}/logs": { + "/api/v1/stream/pipelines/{namespace}/logs": { "get": { "tags": [ - "SensorService" + "PipelineService" ], - "operationId": "SensorService_SensorsLogs", + "operationId": "PipelineService_PipelineLogs", "parameters": [ { "type": "string", @@ -1899,19 +2138,19 @@ }, { "type": "string", - "description": "optional - only return entries for this sensor name.", + "description": "optional - only return entries for this pipeline.", "name": "name", "in": "query" }, { "type": "string", - "description": "optional - only return entries for this trigger.", - "name": "triggerName", + "description": "optional - only return entries for this step.", + "name": "stepName", "in": "query" }, { "type": "string", - "description": "option - only return entries where `msg` contains this regular expressions.", + "description": "optional - only return entries which match this expresssion.", "name": "grep", "in": "query" }, @@ -1986,13 +2225,13 @@ "description": "A successful response.(streaming responses)", "schema": { "type": "object", - "title": "Stream result of sensor.LogEntry", + "title": "Stream result of pipeline.LogEntry", "properties": { "error": { "$ref": "#/definitions/grpc.gateway.runtime.StreamError" }, "result": { - "$ref": "#/definitions/sensor.LogEntry" + "$ref": "#/definitions/pipeline.LogEntry" } } } @@ -2006,62 +2245,18 @@ } } }, - "/api/v1/userinfo": { + "/api/v1/stream/sensors/{namespace}": { "get": { "tags": [ - "InfoService" + "SensorService" ], - "operationId": "InfoService_GetUserInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GetUserInfoResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/grpc.gateway.runtime.Error" - } - } - } - } - }, - "/api/v1/version": { - "get": { - "tags": [ - "InfoService" - ], - "operationId": "InfoService_GetVersion", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Version" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/grpc.gateway.runtime.Error" - } - } - } - } - }, - "/api/v1/workflow-event-bindings/{namespace}": { - "get": { - "tags": [ - "EventService" - ], - "operationId": "EventService_ListWorkflowEventBindings", - "parameters": [ - { - "type": "string", - "name": "namespace", - "in": "path", - "required": true + "operationId": "SensorService_WatchSensors", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true }, { "type": "string", @@ -2122,9 +2317,18 @@ ], "responses": { "200": { - "description": "A successful response.", + "description": "A successful response.(streaming responses)", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowEventBindingList" + "type": "object", + "title": "Stream result of sensor.SensorWatchEvent", + "properties": { + "error": { + "$ref": "#/definitions/grpc.gateway.runtime.StreamError" + }, + "result": { + "$ref": "#/definitions/sensor.SensorWatchEvent" + } + } } }, "default": { @@ -2136,12 +2340,12 @@ } } }, - "/api/v1/workflow-events/{namespace}": { + "/api/v1/stream/sensors/{namespace}/logs": { "get": { "tags": [ - "WorkflowService" + "SensorService" ], - "operationId": "WorkflowService_WatchWorkflows", + "operationId": "SensorService_SensorsLogs", "parameters": [ { "type": "string", @@ -2151,63 +2355,85 @@ }, { "type": "string", - "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.", - "name": "listOptions.labelSelector", + "description": "optional - only return entries for this sensor name.", + "name": "name", "in": "query" }, { "type": "string", - "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.", - "name": "listOptions.fieldSelector", + "description": "optional - only return entries for this trigger.", + "name": "triggerName", "in": "query" }, { - "type": "boolean", - "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.", - "name": "listOptions.watch", + "type": "string", + "description": "option - only return entries where `msg` contains this regular expressions.", + "name": "grep", + "in": "query" + }, + { + "type": "string", + "description": "The container for which to stream logs. Defaults to only container if there is one container in the pod.\n+optional.", + "name": "podLogOptions.container", "in": "query" }, { "type": "boolean", - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n+optional.", - "name": "listOptions.allowWatchBookmarks", + "description": "Follow the log stream of the pod. Defaults to false.\n+optional.", + "name": "podLogOptions.follow", "in": "query" }, { - "type": "string", - "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", - "name": "listOptions.resourceVersion", + "type": "boolean", + "description": "Return previous terminated container logs. Defaults to false.\n+optional.", + "name": "podLogOptions.previous", "in": "query" }, { "type": "string", - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls.\nIt is highly recommended that resourceVersionMatch be set for list calls where\nresourceVersion is set\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", - "name": "listOptions.resourceVersionMatch", + "format": "int64", + "description": "A relative time in seconds before the current time from which to show logs. If this value\nprecedes the time a pod was started, only logs since the pod start will be returned.\nIf this value is in the future, no logs will be returned.\nOnly one of sinceSeconds or sinceTime may be specified.\n+optional.", + "name": "podLogOptions.sinceSeconds", "in": "query" }, { "type": "string", "format": "int64", - "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.", - "name": "listOptions.timeoutSeconds", + "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.", + "name": "podLogOptions.sinceTime.seconds", + "in": "query" + }, + { + "type": "integer", + "format": "int32", + "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.", + "name": "podLogOptions.sinceTime.nanos", + "in": "query" + }, + { + "type": "boolean", + "description": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line\nof log output. Defaults to false.\n+optional.", + "name": "podLogOptions.timestamps", "in": "query" }, { "type": "string", "format": "int64", - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.", - "name": "listOptions.limit", + "description": "If set, the number of lines from the end of the logs to show. If not specified,\nlogs are shown from the creation of the container or sinceSeconds or sinceTime\n+optional.", + "name": "podLogOptions.tailLines", "in": "query" }, { "type": "string", - "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", - "name": "listOptions.continue", + "format": "int64", + "description": "If set, the number of bytes to read from the server before terminating the\nlog output. This may not display a complete final line of logging, and may return\nslightly more or slightly less than the specified limit.\n+optional.", + "name": "podLogOptions.limitBytes", "in": "query" }, { - "type": "string", - "name": "fields", + "type": "boolean", + "description": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the\nserving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver\nand the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real\nkubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the\nconnection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept\nthe actual log data coming from the real kubelet).\n+optional.", + "name": "podLogOptions.insecureSkipTLSVerifyBackend", "in": "query" } ], @@ -2216,13 +2442,13 @@ "description": "A successful response.(streaming responses)", "schema": { "type": "object", - "title": "Stream result of io.argoproj.workflow.v1alpha1.WorkflowWatchEvent", + "title": "Stream result of sensor.LogEntry", "properties": { "error": { "$ref": "#/definitions/grpc.gateway.runtime.StreamError" }, "result": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowWatchEvent" + "$ref": "#/definitions/sensor.LogEntry" } } } @@ -2236,12 +2462,12 @@ } } }, - "/api/v1/workflow-templates/{namespace}": { + "/api/v1/stream/steps/{namespace}": { "get": { "tags": [ - "WorkflowTemplateService" + "PipelineService" ], - "operationId": "WorkflowTemplateService_ListWorkflowTemplates", + "operationId": "PipelineService_WatchSteps", "parameters": [ { "type": "string", @@ -2308,9 +2534,18 @@ ], "responses": { "200": { - "description": "A successful response.", + "description": "A successful response.(streaming responses)", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateList" + "type": "object", + "title": "Stream result of pipeline.StepWatchEvent", + "properties": { + "error": { + "$ref": "#/definitions/grpc.gateway.runtime.StreamError" + }, + "result": { + "$ref": "#/definitions/pipeline.StepWatchEvent" + } + } } }, "default": { @@ -2320,33 +2555,19 @@ } } } - }, - "post": { + } + }, + "/api/v1/userinfo": { + "get": { "tags": [ - "WorkflowTemplateService" - ], - "operationId": "WorkflowTemplateService_CreateWorkflowTemplate", - "parameters": [ - { - "type": "string", - "name": "namespace", - "in": "path", - "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateCreateRequest" - } - } + "InfoService" ], + "operationId": "InfoService_GetUserInfo", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GetUserInfoResponse" } }, "default": { @@ -2358,33 +2579,17 @@ } } }, - "/api/v1/workflow-templates/{namespace}/lint": { - "post": { + "/api/v1/version": { + "get": { "tags": [ - "WorkflowTemplateService" - ], - "operationId": "WorkflowTemplateService_LintWorkflowTemplate", - "parameters": [ - { - "type": "string", - "name": "namespace", - "in": "path", - "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateLintRequest" - } - } + "InfoService" ], + "operationId": "InfoService_GetVersion", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Version" } }, "default": { @@ -2396,12 +2601,12 @@ } } }, - "/api/v1/workflow-templates/{namespace}/{name}": { + "/api/v1/workflow-event-bindings/{namespace}": { "get": { "tags": [ - "WorkflowTemplateService" + "EventService" ], - "operationId": "WorkflowTemplateService_GetWorkflowTemplate", + "operationId": "EventService_ListWorkflowEventBindings", "parameters": [ { "type": "string", @@ -2411,65 +2616,66 @@ }, { "type": "string", - "name": "name", - "in": "path", - "required": true + "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.", + "name": "listOptions.labelSelector", + "in": "query" }, { "type": "string", - "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", - "name": "getOptions.resourceVersion", + "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.", + "name": "listOptions.fieldSelector", "in": "query" - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate" - } }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/grpc.gateway.runtime.Error" - } - } - } - }, - "put": { - "tags": [ - "WorkflowTemplateService" - ], - "operationId": "WorkflowTemplateService_UpdateWorkflowTemplate", - "parameters": [ { - "type": "string", - "name": "namespace", - "in": "path", - "required": true + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.", + "name": "listOptions.watch", + "in": "query" }, { - "type": "string", - "description": "DEPRECATED: This field is ignored.", - "name": "name", - "in": "path", - "required": true + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n+optional.", + "name": "listOptions.allowWatchBookmarks", + "in": "query" }, { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateUpdateRequest" - } + "type": "string", + "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "listOptions.resourceVersion", + "in": "query" + }, + { + "type": "string", + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls.\nIt is highly recommended that resourceVersionMatch be set for list calls where\nresourceVersion is set\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "listOptions.resourceVersionMatch", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.", + "name": "listOptions.timeoutSeconds", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.", + "name": "listOptions.limit", + "in": "query" + }, + { + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", + "name": "listOptions.continue", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowEventBindingList" } }, "default": { @@ -2479,12 +2685,14 @@ } } } - }, - "delete": { + } + }, + "/api/v1/workflow-events/{namespace}": { + "get": { "tags": [ - "WorkflowTemplateService" + "WorkflowService" ], - "operationId": "WorkflowTemplateService_DeleteWorkflowTemplate", + "operationId": "WorkflowService_WatchWorkflows", "parameters": [ { "type": "string", @@ -2494,57 +2702,80 @@ }, { "type": "string", - "name": "name", - "in": "path", - "required": true + "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.", + "name": "listOptions.labelSelector", + "in": "query" }, { "type": "string", - "format": "int64", - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.", - "name": "deleteOptions.gracePeriodSeconds", + "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.", + "name": "listOptions.fieldSelector", + "in": "query" + }, + { + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.", + "name": "listOptions.watch", + "in": "query" + }, + { + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n+optional.", + "name": "listOptions.allowWatchBookmarks", "in": "query" }, { "type": "string", - "description": "Specifies the target UID.\n+optional.", - "name": "deleteOptions.preconditions.uid", + "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "listOptions.resourceVersion", "in": "query" }, { "type": "string", - "description": "Specifies the target ResourceVersion\n+optional.", - "name": "deleteOptions.preconditions.resourceVersion", + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls.\nIt is highly recommended that resourceVersionMatch be set for list calls where\nresourceVersion is set\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "listOptions.resourceVersionMatch", "in": "query" }, { - "type": "boolean", - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.", - "name": "deleteOptions.orphanDependents", + "type": "string", + "format": "int64", + "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.", + "name": "listOptions.timeoutSeconds", "in": "query" }, { "type": "string", - "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.", - "name": "deleteOptions.propagationPolicy", + "format": "int64", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.", + "name": "listOptions.limit", "in": "query" }, { - "type": "array", - "items": { - "type": "string" - }, - "collectionFormat": "multi", - "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.", - "name": "deleteOptions.dryRun", + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", + "name": "listOptions.continue", + "in": "query" + }, + { + "type": "string", + "name": "fields", "in": "query" } ], "responses": { "200": { - "description": "A successful response.", + "description": "A successful response.(streaming responses)", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateDeleteResponse" + "type": "object", + "title": "Stream result of io.argoproj.workflow.v1alpha1.WorkflowWatchEvent", + "properties": { + "error": { + "$ref": "#/definitions/grpc.gateway.runtime.StreamError" + }, + "result": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowWatchEvent" + } + } } }, "default": { @@ -2556,12 +2787,12 @@ } } }, - "/api/v1/workflows/{namespace}": { + "/api/v1/workflow-templates/{namespace}": { "get": { "tags": [ - "WorkflowService" + "WorkflowTemplateService" ], - "operationId": "WorkflowService_ListWorkflows", + "operationId": "WorkflowTemplateService_ListWorkflowTemplates", "parameters": [ { "type": "string", @@ -2624,19 +2855,13 @@ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "name": "listOptions.continue", "in": "query" - }, - { - "type": "string", - "description": "Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\".", - "name": "fields", - "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowList" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateList" } }, "default": { @@ -2649,9 +2874,9 @@ }, "post": { "tags": [ - "WorkflowService" + "WorkflowTemplateService" ], - "operationId": "WorkflowService_CreateWorkflow", + "operationId": "WorkflowTemplateService_CreateWorkflowTemplate", "parameters": [ { "type": "string", @@ -2664,7 +2889,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowCreateRequest" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateCreateRequest" } } ], @@ -2672,7 +2897,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate" } }, "default": { @@ -2684,12 +2909,12 @@ } } }, - "/api/v1/workflows/{namespace}/lint": { + "/api/v1/workflow-templates/{namespace}/lint": { "post": { "tags": [ - "WorkflowService" + "WorkflowTemplateService" ], - "operationId": "WorkflowService_LintWorkflow", + "operationId": "WorkflowTemplateService_LintWorkflowTemplate", "parameters": [ { "type": "string", @@ -2702,7 +2927,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowLintRequest" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateLintRequest" } } ], @@ -2710,7 +2935,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate" } }, "default": { @@ -2722,12 +2947,12 @@ } } }, - "/api/v1/workflows/{namespace}/submit": { - "post": { + "/api/v1/workflow-templates/{namespace}/{name}": { + "get": { "tags": [ - "WorkflowService" + "WorkflowTemplateService" ], - "operationId": "WorkflowService_SubmitWorkflow", + "operationId": "WorkflowTemplateService_GetWorkflowTemplate", "parameters": [ { "type": "string", @@ -2736,19 +2961,23 @@ "required": true }, { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSubmitRequest" - } + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "getOptions.resourceVersion", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate" } }, "default": { @@ -2758,14 +2987,12 @@ } } } - } - }, - "/api/v1/workflows/{namespace}/{name}": { - "get": { + }, + "put": { "tags": [ - "WorkflowService" + "WorkflowTemplateService" ], - "operationId": "WorkflowService_GetWorkflow", + "operationId": "WorkflowTemplateService_UpdateWorkflowTemplate", "parameters": [ { "type": "string", @@ -2775,28 +3002,25 @@ }, { "type": "string", + "description": "DEPRECATED: This field is ignored.", "name": "name", "in": "path", "required": true }, { - "type": "string", - "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", - "name": "getOptions.resourceVersion", - "in": "query" - }, - { - "type": "string", - "description": "Fields to be included or excluded in the response. e.g. \"spec,status.phase\", \"-status.nodes\".", - "name": "fields", - "in": "query" + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateUpdateRequest" + } } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate" } }, "default": { @@ -2809,9 +3033,9 @@ }, "delete": { "tags": [ - "WorkflowService" + "WorkflowTemplateService" ], - "operationId": "WorkflowService_DeleteWorkflow", + "operationId": "WorkflowTemplateService_DeleteWorkflowTemplate", "parameters": [ { "type": "string", @@ -2871,7 +3095,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowDeleteResponse" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateDeleteResponse" } }, "default": { @@ -2883,12 +3107,12 @@ } } }, - "/api/v1/workflows/{namespace}/{name}/log": { + "/api/v1/workflows/{namespace}": { "get": { "tags": [ "WorkflowService" ], - "operationId": "WorkflowService_WorkflowLogs", + "operationId": "WorkflowService_ListWorkflows", "parameters": [ { "type": "string", @@ -2898,100 +3122,72 @@ }, { "type": "string", - "name": "name", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "podName", + "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.", + "name": "listOptions.labelSelector", "in": "query" }, { "type": "string", - "description": "The container for which to stream logs. Defaults to only container if there is one container in the pod.\n+optional.", - "name": "logOptions.container", + "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.", + "name": "listOptions.fieldSelector", "in": "query" }, { "type": "boolean", - "description": "Follow the log stream of the pod. Defaults to false.\n+optional.", - "name": "logOptions.follow", + "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.", + "name": "listOptions.watch", "in": "query" }, { "type": "boolean", - "description": "Return previous terminated container logs. Defaults to false.\n+optional.", - "name": "logOptions.previous", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n+optional.", + "name": "listOptions.allowWatchBookmarks", "in": "query" }, { "type": "string", - "format": "int64", - "description": "A relative time in seconds before the current time from which to show logs. If this value\nprecedes the time a pod was started, only logs since the pod start will be returned.\nIf this value is in the future, no logs will be returned.\nOnly one of sinceSeconds or sinceTime may be specified.\n+optional.", - "name": "logOptions.sinceSeconds", + "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "listOptions.resourceVersion", "in": "query" }, { "type": "string", - "format": "int64", - "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.", - "name": "logOptions.sinceTime.seconds", + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls.\nIt is highly recommended that resourceVersionMatch be set for list calls where\nresourceVersion is set\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "listOptions.resourceVersionMatch", "in": "query" }, { - "type": "integer", - "format": "int32", - "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.", - "name": "logOptions.sinceTime.nanos", - "in": "query" - }, - { - "type": "boolean", - "description": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line\nof log output. Defaults to false.\n+optional.", - "name": "logOptions.timestamps", + "type": "string", + "format": "int64", + "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.", + "name": "listOptions.timeoutSeconds", "in": "query" }, { "type": "string", "format": "int64", - "description": "If set, the number of lines from the end of the logs to show. If not specified,\nlogs are shown from the creation of the container or sinceSeconds or sinceTime\n+optional.", - "name": "logOptions.tailLines", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.", + "name": "listOptions.limit", "in": "query" }, { "type": "string", - "format": "int64", - "description": "If set, the number of bytes to read from the server before terminating the\nlog output. This may not display a complete final line of logging, and may return\nslightly more or slightly less than the specified limit.\n+optional.", - "name": "logOptions.limitBytes", - "in": "query" - }, - { - "type": "boolean", - "description": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the\nserving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver\nand the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real\nkubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the\nconnection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept\nthe actual log data coming from the real kubelet).\n+optional.", - "name": "logOptions.insecureSkipTLSVerifyBackend", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", + "name": "listOptions.continue", "in": "query" }, { "type": "string", - "name": "grep", + "description": "Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\".", + "name": "fields", "in": "query" } ], "responses": { "200": { - "description": "A successful response.(streaming responses)", + "description": "A successful response.", "schema": { - "type": "object", - "title": "Stream result of io.argoproj.workflow.v1alpha1.LogEntry", - "properties": { - "error": { - "$ref": "#/definitions/grpc.gateway.runtime.StreamError" - }, - "result": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.LogEntry" - } - } + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowList" } }, "default": { @@ -3001,14 +3197,12 @@ } } } - } - }, - "/api/v1/workflows/{namespace}/{name}/resubmit": { - "put": { + }, + "post": { "tags": [ "WorkflowService" ], - "operationId": "WorkflowService_ResubmitWorkflow", + "operationId": "WorkflowService_CreateWorkflow", "parameters": [ { "type": "string", @@ -3016,18 +3210,12 @@ "in": "path", "required": true }, - { - "type": "string", - "name": "name", - "in": "path", - "required": true - }, { "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowResubmitRequest" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowCreateRequest" } } ], @@ -3047,12 +3235,12 @@ } } }, - "/api/v1/workflows/{namespace}/{name}/resume": { - "put": { + "/api/v1/workflows/{namespace}/lint": { + "post": { "tags": [ "WorkflowService" ], - "operationId": "WorkflowService_ResumeWorkflow", + "operationId": "WorkflowService_LintWorkflow", "parameters": [ { "type": "string", @@ -3060,18 +3248,12 @@ "in": "path", "required": true }, - { - "type": "string", - "name": "name", - "in": "path", - "required": true - }, { "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowResumeRequest" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowLintRequest" } } ], @@ -3091,12 +3273,12 @@ } } }, - "/api/v1/workflows/{namespace}/{name}/retry": { - "put": { + "/api/v1/workflows/{namespace}/submit": { + "post": { "tags": [ "WorkflowService" ], - "operationId": "WorkflowService_RetryWorkflow", + "operationId": "WorkflowService_SubmitWorkflow", "parameters": [ { "type": "string", @@ -3104,18 +3286,12 @@ "in": "path", "required": true }, - { - "type": "string", - "name": "name", - "in": "path", - "required": true - }, { "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowRetryRequest" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSubmitRequest" } } ], @@ -3135,12 +3311,12 @@ } } }, - "/api/v1/workflows/{namespace}/{name}/set": { - "put": { + "/api/v1/workflows/{namespace}/{name}": { + "get": { "tags": [ "WorkflowService" ], - "operationId": "WorkflowService_SetWorkflow", + "operationId": "WorkflowService_GetWorkflow", "parameters": [ { "type": "string", @@ -3155,12 +3331,16 @@ "required": true }, { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSetRequest" - } + "type": "string", + "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional", + "name": "getOptions.resourceVersion", + "in": "query" + }, + { + "type": "string", + "description": "Fields to be included or excluded in the response. e.g. \"spec,status.phase\", \"-status.nodes\".", + "name": "fields", + "in": "query" } ], "responses": { @@ -3177,14 +3357,12 @@ } } } - } - }, - "/api/v1/workflows/{namespace}/{name}/stop": { - "put": { + }, + "delete": { "tags": [ "WorkflowService" ], - "operationId": "WorkflowService_StopWorkflow", + "operationId": "WorkflowService_DeleteWorkflow", "parameters": [ { "type": "string", @@ -3198,108 +3376,53 @@ "in": "path", "required": true }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowStopRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/grpc.gateway.runtime.Error" - } - } - } - } - }, - "/api/v1/workflows/{namespace}/{name}/suspend": { - "put": { - "tags": [ - "WorkflowService" - ], - "operationId": "WorkflowService_SuspendWorkflow", - "parameters": [ { "type": "string", - "name": "namespace", - "in": "path", - "required": true + "format": "int64", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.", + "name": "deleteOptions.gracePeriodSeconds", + "in": "query" }, { "type": "string", - "name": "name", - "in": "path", - "required": true + "description": "Specifies the target UID.\n+optional.", + "name": "deleteOptions.preconditions.uid", + "in": "query" }, { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSuspendRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" - } + "type": "string", + "description": "Specifies the target ResourceVersion\n+optional.", + "name": "deleteOptions.preconditions.resourceVersion", + "in": "query" }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/grpc.gateway.runtime.Error" - } - } - } - } - }, - "/api/v1/workflows/{namespace}/{name}/terminate": { - "put": { - "tags": [ - "WorkflowService" - ], - "operationId": "WorkflowService_TerminateWorkflow", - "parameters": [ { - "type": "string", - "name": "namespace", - "in": "path", - "required": true + "type": "boolean", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.", + "name": "deleteOptions.orphanDependents", + "in": "query" }, { "type": "string", - "name": "name", - "in": "path", - "required": true + "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.", + "name": "deleteOptions.propagationPolicy", + "in": "query" }, { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTerminateRequest" - } + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.", + "name": "deleteOptions.dryRun", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowDeleteResponse" } }, "default": { @@ -3311,13 +3434,12 @@ } } }, - "/api/v1/workflows/{namespace}/{name}/{podName}/log": { + "/api/v1/workflows/{namespace}/{name}/log": { "get": { "tags": [ "WorkflowService" ], - "summary": "DEPRECATED: Cannot work via HTTP if podName is an empty string. Use WorkflowLogs.", - "operationId": "WorkflowService_PodLogs", + "operationId": "WorkflowService_WorkflowLogs", "parameters": [ { "type": "string", @@ -3334,8 +3456,7 @@ { "type": "string", "name": "podName", - "in": "path", - "required": true + "in": "query" }, { "type": "string", @@ -3432,74 +3553,1403 @@ } } } - } - }, - "definitions": { - "eventsource.CreateEventSourceRequest": { + }, + "/api/v1/workflows/{namespace}/{name}/resubmit": { + "put": { + "tags": [ + "WorkflowService" + ], + "operationId": "WorkflowService_ResubmitWorkflow", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowResubmitRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, + "/api/v1/workflows/{namespace}/{name}/resume": { + "put": { + "tags": [ + "WorkflowService" + ], + "operationId": "WorkflowService_ResumeWorkflow", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowResumeRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, + "/api/v1/workflows/{namespace}/{name}/retry": { + "put": { + "tags": [ + "WorkflowService" + ], + "operationId": "WorkflowService_RetryWorkflow", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowRetryRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, + "/api/v1/workflows/{namespace}/{name}/set": { + "put": { + "tags": [ + "WorkflowService" + ], + "operationId": "WorkflowService_SetWorkflow", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSetRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, + "/api/v1/workflows/{namespace}/{name}/stop": { + "put": { + "tags": [ + "WorkflowService" + ], + "operationId": "WorkflowService_StopWorkflow", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowStopRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, + "/api/v1/workflows/{namespace}/{name}/suspend": { + "put": { + "tags": [ + "WorkflowService" + ], + "operationId": "WorkflowService_SuspendWorkflow", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSuspendRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, + "/api/v1/workflows/{namespace}/{name}/terminate": { + "put": { + "tags": [ + "WorkflowService" + ], + "operationId": "WorkflowService_TerminateWorkflow", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTerminateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, + "/api/v1/workflows/{namespace}/{name}/{podName}/log": { + "get": { + "tags": [ + "WorkflowService" + ], + "summary": "DEPRECATED: Cannot work via HTTP if podName is an empty string. Use WorkflowLogs.", + "operationId": "WorkflowService_PodLogs", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "podName", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The container for which to stream logs. Defaults to only container if there is one container in the pod.\n+optional.", + "name": "logOptions.container", + "in": "query" + }, + { + "type": "boolean", + "description": "Follow the log stream of the pod. Defaults to false.\n+optional.", + "name": "logOptions.follow", + "in": "query" + }, + { + "type": "boolean", + "description": "Return previous terminated container logs. Defaults to false.\n+optional.", + "name": "logOptions.previous", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "A relative time in seconds before the current time from which to show logs. If this value\nprecedes the time a pod was started, only logs since the pod start will be returned.\nIf this value is in the future, no logs will be returned.\nOnly one of sinceSeconds or sinceTime may be specified.\n+optional.", + "name": "logOptions.sinceSeconds", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.", + "name": "logOptions.sinceTime.seconds", + "in": "query" + }, + { + "type": "integer", + "format": "int32", + "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.", + "name": "logOptions.sinceTime.nanos", + "in": "query" + }, + { + "type": "boolean", + "description": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line\nof log output. Defaults to false.\n+optional.", + "name": "logOptions.timestamps", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "If set, the number of lines from the end of the logs to show. If not specified,\nlogs are shown from the creation of the container or sinceSeconds or sinceTime\n+optional.", + "name": "logOptions.tailLines", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "If set, the number of bytes to read from the server before terminating the\nlog output. This may not display a complete final line of logging, and may return\nslightly more or slightly less than the specified limit.\n+optional.", + "name": "logOptions.limitBytes", + "in": "query" + }, + { + "type": "boolean", + "description": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the\nserving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver\nand the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real\nkubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the\nconnection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept\nthe actual log data coming from the real kubelet).\n+optional.", + "name": "logOptions.insecureSkipTLSVerifyBackend", + "in": "query" + }, + { + "type": "string", + "name": "grep", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.(streaming responses)", + "schema": { + "type": "object", + "title": "Stream result of io.argoproj.workflow.v1alpha1.LogEntry", + "properties": { + "error": { + "$ref": "#/definitions/grpc.gateway.runtime.StreamError" + }, + "result": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.LogEntry" + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + } + }, + "definitions": { + "eventsource.CreateEventSourceRequest": { + "type": "object", + "properties": { + "eventSource": { + "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource" + }, + "namespace": { + "type": "string" + } + } + }, + "eventsource.EventSourceDeletedResponse": { + "type": "object" + }, + "eventsource.EventSourceWatchEvent": { + "type": "object", + "properties": { + "object": { + "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource" + }, + "type": { + "type": "string" + } + } + }, + "eventsource.LogEntry": { + "type": "object", + "title": "structured log entry", + "properties": { + "eventName": { + "type": "string", + "title": "optional - the event name (e.g. `example`)" + }, + "eventSourceName": { + "type": "string" + }, + "eventSourceType": { + "type": "string", + "title": "optional - the event source type (e.g. `webhook`)" + }, + "level": { + "type": "string" + }, + "msg": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "time": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, + "eventsource.UpdateEventSourceRequest": { + "type": "object", + "properties": { + "eventSource": { + "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSCredentials": { + "type": "object", + "properties": { + "accessKeyId": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "secretAccessKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSEndpoint": { + "type": "object", + "properties": { + "url": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Backoff": { + "type": "object", + "properties": { + "FactorPercentage": { + "type": "integer", + "title": "+kubebuilder:default=200" + }, + "cap": { + "title": "+kubebuilder:default=\"0ms\"", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" + }, + "duration": { + "title": "+kubebuilder:default=\"100ms\"", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" + }, + "jitterPercentage": { + "type": "integer", + "title": "the amount of jitter per step, typically 10-20%, \u003e100% is valid, but strange\n+kubebuilder:default=10" + }, + "steps": { + "type": "string", + "format": "uint64", + "title": "the number of backoff steps, zero means no retries\n+kubebuilder:default=20" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cat": { + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Code": { + "type": "object", + "properties": { + "runtime": { + "type": "string" + }, + "source": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Container": { + "type": "object", + "properties": { + "args": { + "type": "array", + "items": { + "type": "string" + } + }, + "command": { + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + } + }, + "image": { + "type": "string" + }, + "in": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Interface" + }, + "resources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "volumeMounts": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" + } + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cron": { + "type": "object", + "properties": { + "layout": { + "type": "string", + "title": "+kubebuilder:default=\"2006-01-02T15:04:05Z07:00\"" + }, + "schedule": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSource": { + "type": "object", + "properties": { + "value": { + "type": "string" + }, + "valueFrom": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSourceFrom" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSourceFrom": { + "type": "object", + "properties": { + "secretKeyRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSink": { + "type": "object", + "properties": { + "actions": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLAction" + } + }, + "database": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSource": { + "type": "object", + "properties": { + "commitInterval": { + "title": "+kubebuilder:default=\"5s\"", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" + }, + "database": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database" + }, + "initSchema": { + "type": "boolean", + "title": "+kubebuilder:default=true" + }, + "offsetColumn": { + "type": "string" + }, + "pollInterval": { + "title": "+kubebuilder:default=\"1s\"", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" + }, + "query": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database": { + "type": "object", + "properties": { + "dataSource": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSource" + }, + "driver": { + "type": "string", + "title": "+kubebuilder:default=default" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Dedupe": { + "type": "object", + "properties": { + "maxSize": { + "title": "MaxSize is the maximum number of entries to keep in the in-memory database used to store recent UIDs.\nLarger number mean bigger windows of time for dedupe, but greater memory usage.\n+kubebuilder:default=\"1M\"", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "uid": { + "type": "string", + "title": "+kubebuilder:default=\"sha1(msg)\"" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Expand": { + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Flatten": { + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Git": { + "type": "object", + "properties": { + "branch": { + "type": "string", + "title": "+kubebuilder:default=main" + }, + "command": { + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + } + }, + "image": { + "type": "string" + }, + "passwordSecret": { + "title": "PasswordSecret is the secret selector to the repository password", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "path": { + "description": "+kubebuilder:default=.", + "type": "string" + }, + "sshPrivateKeySecret": { + "title": "SSHPrivateKeySecret is the secret selector to the repository ssh private key", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "url": { + "type": "string" + }, + "usernameSecret": { + "title": "UsernameSecret is the secret selector to the repository username", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Group": { + "type": "object", + "properties": { + "endOfGroup": { + "type": "string" + }, + "format": { + "type": "string" + }, + "key": { + "type": "string" + }, + "storage": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Storage" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTP": { + "type": "object" + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeader": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeaderSource" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeaderSource": { + "type": "object", + "properties": { + "secretKeyRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSink": { + "type": "object", + "properties": { + "headers": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeader" + } + }, + "url": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSource": { + "type": "object", + "properties": { + "serviceName": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Interface": { + "type": "object", + "properties": { + "fifo": { + "type": "boolean" + }, + "http": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTP" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka": { + "type": "object", + "properties": { + "kafkaConfig": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaConfig" + }, + "name": { + "type": "string", + "title": "+kubebuilder:default=default" + }, + "topic": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaConfig": { + "type": "object", + "properties": { + "brokers": { + "type": "array", + "items": { + "type": "string" + } + }, + "net": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaNET" + }, + "version": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaNET": { + "type": "object", + "properties": { + "sasl": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SASL" + }, + "tls": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.TLS" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSink": { + "type": "object", + "properties": { + "async": { + "type": "boolean" + }, + "kafka": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSource": { + "type": "object", + "properties": { + "kafka": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka" + }, + "startOffset": { + "type": "string", + "title": "+kubebuilder:default=Last" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Log": { + "type": "object", + "properties": { + "truncate": { + "type": "string", + "format": "uint64" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metadata": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metrics": { + "type": "object", + "properties": { + "errors": { + "type": "string", + "format": "uint64" + }, + "rate": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "retries": { + "type": "string", + "format": "uint64", + "title": "current rate of messages per second" + }, + "total": { + "type": "string", + "format": "uint64" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline": { + "type": "object", + "title": "+kubebuilder:object:root=true\n+kubebuilder:resource:shortName=pl\n+kubebuilder:subresource:status\n+kubebuilder:printcolumn:name=\"Phase\",type=string,JSONPath=`.status.phase`\n+kubebuilder:printcolumn:name=\"Message\",type=string,JSONPath=`.status.message`", + "properties": { + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineSpec" + }, + "status": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineStatus" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline" + } + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineSpec": { + "type": "object", + "properties": { + "steps": { + "type": "array", + "title": "+patchStrategy=merge\n+patchMergeKey=name", + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec" + } + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineStatus": { + "type": "object", + "properties": { + "conditions": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + } + }, + "lastUpdated": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "type": "string" + }, + "phase": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3": { + "type": "object", + "properties": { + "bucket": { + "type": "string" + }, + "credentials": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSCredentials" + }, + "endpoint": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSEndpoint" + }, + "name": { + "type": "string", + "title": "+kubebuilder:default=default" + }, + "region": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Sink": { + "type": "object", + "properties": { + "s3": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Source": { + "type": "object", + "properties": { + "pollPeriod": { + "title": "+kubebuilder:default=\"1m\"", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" + }, + "s3": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SASL": { + "type": "object", + "properties": { + "mechanism": { + "type": "string", + "title": "SASLMechanism is the name of the enabled SASL mechanism.\nPossible values: OAUTHBEARER, PLAIN (defaults to PLAIN).\n+optional" + }, + "password": { + "title": "Password for SASL/PLAIN authentication", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "user": { + "title": "User is the authentication identity (authcid) to present for\nSASL/PLAIN or SASL/SCRAM authentication", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLAction": { + "type": "object", + "properties": { + "onError": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement" + }, + "onRecordNotFound": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement" + }, + "statement": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement": { + "type": "object", + "properties": { + "args": { + "type": "array", + "items": { + "type": "string" + } + }, + "sql": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN": { + "type": "object", + "properties": { + "auth": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STANAuth" + }, + "clusterId": { + "type": "string" + }, + "maxInflight": { + "type": "integer", + "title": "Max inflight messages when subscribing to the stan server, which means how many messages\nbetween commits, therefore potential duplicates during disruption\n+kubebuilder:default=20" + }, + "name": { + "type": "string", + "title": "+kubebuilder:default=default" + }, + "natsMonitoringUrl": { + "type": "string" + }, + "natsUrl": { + "type": "string" + }, + "subject": { + "type": "string" + }, + "subjectPrefix": { + "type": "string" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STANAuth": { + "type": "object", + "properties": { + "token": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Scale": { + "type": "object", + "properties": { + "maxReplicas": { + "type": "integer" + }, + "minReplicas": { + "type": "integer" + }, + "replicaRatio": { + "type": "integer", + "title": "takes precedence over min" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sidecar": { + "type": "object", + "properties": { + "resources": { + "title": "+kubebuilder:default={limits: {\"cpu\": \"500m\", \"memory\": \"256Mi\"}, requests: {\"cpu\": \"100m\", \"memory\": \"64Mi\"}}", + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sink": { "type": "object", "properties": { - "eventSource": { - "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource" + "db": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSink" }, - "namespace": { - "type": "string" + "http": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSink" + }, + "kafka": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSink" + }, + "log": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Log" + }, + "name": { + "type": "string", + "title": "+kubebuilder:default=default" + }, + "s3": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Sink" + }, + "stan": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN" } } }, - "eventsource.EventSourceDeletedResponse": { - "type": "object" - }, - "eventsource.EventSourceWatchEvent": { + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Source": { "type": "object", "properties": { - "object": { - "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource" + "cron": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cron" }, - "type": { - "type": "string" + "db": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSource" + }, + "http": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSource" + }, + "kafka": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSource" + }, + "name": { + "type": "string", + "title": "+kubebuilder:default=default" + }, + "retry": { + "title": "+kubebuilder:default={duration: \"100ms\", steps: 20, factorPercentage: 200, jitterPercentage: 10}", + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Backoff" + }, + "s3": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Source" + }, + "stan": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN" } } }, - "eventsource.LogEntry": { + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SourceStatus": { "type": "object", - "title": "structured log entry", "properties": { - "eventName": { + "metrics": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metrics" + } + }, + "pending": { "type": "string", - "title": "optional - the event name (e.g. `example`)" + "format": "uint64" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Step": { + "type": "object", + "title": "+kubebuilder:object:root=true\n+kubebuilder:subresource:status\n+kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector\n+kubebuilder:printcolumn:name=\"Phase\",type=string,JSONPath=`.status.phase`\n+kubebuilder:printcolumn:name=\"Reason\",type=string,JSONPath=`.status.reason`\n+kubebuilder:printcolumn:name=\"Message\",type=string,JSONPath=`.status.message`\n+kubebuilder:printcolumn:name=\"Desired\",type=string,JSONPath=`.spec.replicas`\n+kubebuilder:printcolumn:name=\"Current\",type=string,JSONPath=`.status.replicas`", + "properties": { + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, - "eventSourceName": { + "spec": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec" + }, + "status": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepStatus" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec": { + "type": "object", + "properties": { + "affinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity" + }, + "cat": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cat" + }, + "code": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Code" + }, + "container": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Container" + }, + "dedupe": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Dedupe" + }, + "expand": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Expand" + }, + "filter": { "type": "string" }, - "eventSourceType": { + "flatten": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Flatten" + }, + "git": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Git" + }, + "group": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Group" + }, + "imagePullSecrets": { + "type": "array", + "title": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images\nin pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets\ncan be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.\nMore info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod\n+patchStrategy=merge\n+patchMergeKey=name", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + } + }, + "map": { + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metadata" + }, + "name": { "type": "string", - "title": "optional - the event source type (e.g. `webhook`)" + "title": "+kubebuilder:default=default" }, - "level": { + "nodeSelector": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "replicas": { + "type": "integer", + "title": "+kubebuilder:default=1" + }, + "restartPolicy": { + "type": "string", + "title": "+kubebuilder:default=OnFailure" + }, + "scale": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Scale" + }, + "serviceAccountName": { + "type": "string", + "title": "+kubebuilder:default=pipeline" + }, + "sidecar": { + "title": "+kubebuilder:default={resources: {limits: {\"cpu\": \"500m\", \"memory\": \"256Mi\"}, requests: {\"cpu\": \"100m\", \"memory\": \"64Mi\"}}}", + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sidecar" + }, + "sinks": { + "type": "array", + "title": "+patchStrategy=merge\n+patchMergeKey=name", + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sink" + } + }, + "sources": { + "type": "array", + "title": "+patchStrategy=merge\n+patchMergeKey=name", + "items": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Source" + } + }, + "terminator": { + "type": "boolean" + }, + "tolerations": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + } + }, + "volumes": { + "type": "array", + "title": "+patchStrategy=merge\n+patchMergeKey=name", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Volume" + } + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepStatus": { + "type": "object", + "properties": { + "lastScaledAt": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { "type": "string" }, - "msg": { + "phase": { "type": "string" }, - "namespace": { + "reason": { "type": "string" }, - "time": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + "replicas": { + "type": "integer" + }, + "selector": { + "type": "string" + }, + "sinkStatuses": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SourceStatus" + } + }, + "sourceStatuses": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SourceStatus" + } } } }, - "eventsource.UpdateEventSourceRequest": { + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Storage": { "type": "object", "properties": { - "eventSource": { - "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource" - }, "name": { "type": "string" }, - "namespace": { - "type": "string" + "subPath": { + "type": "string", + "title": "volume name" + } + } + }, + "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.TLS": { + "type": "object", + "properties": { + "caCertSecret": { + "title": "CACertSecret refers to the secret that contains the CA cert", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "certSecret": { + "title": "CertSecret refers to the secret that contains the cert", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "keySecret": { + "title": "KeySecret refers to the secret that contains the key", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" } } }, @@ -12742,6 +14192,37 @@ "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "type": "string" }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Condition": { + "description": "// other fields\n}", + "type": "object", + "title": "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\ntype FooStatus struct{\n // Represents the observations of a foo's current state.\n // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n // +patchMergeKey=type\n // +patchStrategy=merge\n // +listType=map\n // +listMapKey=type\n Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`", + "properties": { + "lastTransitionTime": { + "title": "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:Type=string\n+kubebuilder:validation:Format=date-time", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "type": "string", + "title": "message is a human readable message indicating details about the transition.\nThis may be an empty string.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:MaxLength=32768" + }, + "observedGeneration": { + "type": "string", + "title": "observedGeneration represents the .metadata.generation that the condition was set based upon.\nFor instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date\nwith respect to the current state of the instance.\n+optional\n+kubebuilder:validation:Minimum=0" + }, + "reason": { + "type": "string", + "title": "reason contains a programmatic identifier indicating the reason for the condition's last transition.\nProducers of specific condition types may define expected values and meanings for this field,\nand whether the values are considered a guaranteed API.\nThe value should be a CamelCase string.\nThis field may not be empty.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:MaxLength=1024\n+kubebuilder:validation:MinLength=1\n+kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`" + }, + "status": { + "type": "string", + "title": "status of the condition, one of True, False, Unknown.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:Enum=True;False;Unknown" + }, + "type": { + "type": "string", + "title": "type of condition in CamelCase or in foo.example.com/CamelCase.\n---\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions can be\nuseful (see .node.status.conditions), the ability to deconflict is important.\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`\n+kubebuilder:validation:MaxLength=316" + } + } + }, "io.k8s.apimachinery.pkg.apis.meta.v1.CreateOptions": { "description": "CreateOptions may be provided when creating an API object.", "type": "object", @@ -12759,6 +14240,15 @@ } } }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Duration": { + "description": "Duration is a wrapper around time.Duration which supports correct\nmarshaling to YAML and JSON. In particular, it marshals into strings, which\ncan be used as map keys in json.", + "type": "object", + "properties": { + "duration": { + "type": "string" + } + } + }, "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1": { "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", "type": "object" @@ -13066,6 +14556,55 @@ "io.k8s.apimachinery.pkg.util.intstr.IntOrString": { "type": "string" }, + "pipeline.DeletePipelineResponse": { + "type": "object" + }, + "pipeline.LogEntry": { + "type": "object", + "title": "structured log entry", + "properties": { + "msg": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "pipelineName": { + "type": "string" + }, + "stepName": { + "type": "string" + }, + "time": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, + "pipeline.PipelineWatchEvent": { + "type": "object", + "properties": { + "object": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline" + }, + "type": { + "type": "string" + } + } + }, + "pipeline.RestartPipelineResponse": { + "type": "object" + }, + "pipeline.StepWatchEvent": { + "type": "object", + "properties": { + "object": { + "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Step" + }, + "type": { + "type": "string" + } + } + }, "sensor.CreateSensorRequest": { "type": "object", "properties": { diff --git a/cmd/argo/commands/server.go b/cmd/argo/commands/server.go index 89fbe96ab268..75e08beb0191 100644 --- a/cmd/argo/commands/server.go +++ b/cmd/argo/commands/server.go @@ -17,6 +17,7 @@ import ( "github.com/skratchdot/open-golang/open" "github.com/spf13/cobra" "golang.org/x/net/context" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" restclient "k8s.io/client-go/rest" @@ -71,10 +72,11 @@ See %s`, help.ArgoSever), namespace := client.Namespace() clients := &types.Clients{ - Workflow: wfclientset.NewForConfigOrDie(config), + Dynamic: dynamic.NewForConfigOrDie(config), EventSource: eventsource.NewForConfigOrDie(config), - Sensor: sensor.NewForConfigOrDie(config), Kubernetes: kubernetes.NewForConfigOrDie(config), + Sensor: sensor.NewForConfigOrDie(config), + Workflow: wfclientset.NewForConfigOrDie(config), } ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/go.mod b/go.mod index f416e5096f5f..5cb95a436a99 100644 --- a/go.mod +++ b/go.mod @@ -6,11 +6,11 @@ require ( cloud.google.com/go v0.55.0 // indirect cloud.google.com/go/storage v1.6.0 github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible - github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible github.com/TwinProduction/go-color v0.0.3 github.com/aliyun/aliyun-oss-go-sdk v2.1.8+incompatible - github.com/antonmedv/expr v1.8.8 + github.com/antonmedv/expr v1.8.9 + github.com/argoproj-labs/argo-dataflow v0.0.88 github.com/argoproj/argo-events v1.4.0 github.com/argoproj/pkg v0.10.1 github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect @@ -26,7 +26,7 @@ require ( github.com/go-openapi/jsonreference v0.19.5 github.com/go-openapi/spec v0.20.2 github.com/go-python/gpython v0.0.3 - github.com/go-sql-driver/mysql v1.5.0 + github.com/go-sql-driver/mysql v1.6.0 github.com/go-swagger/go-swagger v0.25.0 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.4.3 @@ -36,7 +36,6 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/imkira/go-interpol v1.1.0 // indirect - github.com/klauspost/compress v1.11.9 // indirect github.com/klauspost/pgzip v1.2.5 github.com/minio/minio-go/v7 v7.0.2 github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b @@ -56,13 +55,12 @@ require ( github.com/valyala/fasttemplate v1.1.0 github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 - golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 + golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 + golang.org/x/net v0.0.0-20210614182718-04defd469f4e golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/text v0.3.6 // indirect golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e - golang.org/x/tools v0.1.3-0.20210608163600-9ed039809d4c + golang.org/x/tools v0.1.5 google.golang.org/api v0.20.0 google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 google.golang.org/grpc v1.33.1 @@ -72,7 +70,6 @@ require ( gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect gopkg.in/square/go-jose.v2 v2.5.1 gopkg.in/src-d/go-git.v4 v4.13.1 - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect honnef.co/go/tools v0.2.0 // indirect k8s.io/api v0.20.4 k8s.io/apimachinery v0.20.4 diff --git a/go.sum b/go.sum index cc40c7c40906..41ecb8c226ae 100644 --- a/go.sum +++ b/go.sum @@ -89,8 +89,9 @@ github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= @@ -111,6 +112,7 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= +github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/TwinProduction/go-color v0.0.3 h1:2asEWaZo0Oh/FCib+KqHmEoideK8fMyX58JujC/dbDA= github.com/TwinProduction/go-color v0.0.3/go.mod h1:5hWpSyT+mmKPjCwPNEruBW5Dkbs/2PwOuU468ntEXNQ= @@ -136,14 +138,17 @@ github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.8.8 h1:uVwIkIBNO2yn4vY2u2DQUqXTmv9jEEMCEcHa19G5weY= github.com/antonmedv/expr v1.8.8/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/antonmedv/expr v1.8.9 h1:O9stiHmHHww9b4ozhPx7T6BK7fXfOCHJ8ybxf0833zw= +github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= github.com/apache/openwhisk-client-go v0.0.0-20190915054138-716c6f973eb2/go.mod h1:jLLKYP7+1+LFlIJW1n9U1gqeveLM1HIwa4ZHNOFxjPw= github.com/apache/pulsar-client-go v0.1.1/go.mod h1:mlxC65KL1BLhGO2bnT9zWMttVzR2czVPb27D477YpyU= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/ardielle/ardielle-go v1.5.2/go.mod h1:I4hy1n795cUhaVt/ojz83SNVCYIGsAFAONtv2Dr7HUI= github.com/ardielle/ardielle-tools v1.5.4/go.mod h1:oZN+JRMnqGiIhrzkRN9l26Cej9dEx4jeNG6A+AdkShk= +github.com/argoproj-labs/argo-dataflow v0.0.88 h1:Q7LwlK9FbNB00LzZ5/9Qcg3Im1t8/HyHhwVITVSMypY= +github.com/argoproj-labs/argo-dataflow v0.0.88/go.mod h1:HOdmu21+Xx59R28S8+e+wMx+/ApHWNtglPqLvgLRI2E= github.com/argoproj/argo-events v1.4.0 h1:RIzAOomP/4rnv3X6KIDKzXZJ56JKFxSYmksoE98ILWI= github.com/argoproj/argo-events v1.4.0/go.mod h1:wI5A0U3Wj9ZvfPn3ioL18Dz29+7aibtlyU9pS0Ry+bg= github.com/argoproj/pkg v0.9.0/go.mod h1:ra+bQPmbVAoEL+gYSKesuigt4m49i3Qa3mE/xQcjCiA= @@ -169,6 +174,12 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.33.16 h1:h/3BL2BQMEbS67BPoEo/5jD8IPGVrKBmoa4S9mBBntw= github.com/aws/aws-sdk-go v1.33.16/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.1/go.mod h1:v33JQ57i2nekYTA70Mb+O18KeH4KqhdqxTJZNK1zdRE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.1/go.mod h1:zceowr5Z1Nh2WVP8bf/3ikB41IZW59E4yIYbg+pC6mw= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.1/go.mod h1:6EQZIwNNvHpq/2/QSJnp4+ECvqIy55w95Ofs0ze+nGQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.11.1/go.mod h1:XLAGFrEjbvMCLvAtWLLP32yTv8GpBquCApZEycDLunI= +github.com/aws/smithy-go v1.6.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beefsack/go-rate v0.0.0-20180408011153-efa7637bb9b6/go.mod h1:6YNgTHLutezwnBvyneBbwvB8C82y3dcoOj5EQJIdGXA= @@ -184,6 +195,7 @@ github.com/blushft/go-diagrams v0.0.0-20201006005127-c78c821223d9/go.mod h1:nDeX github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bombsimon/logrusr v1.1.0/go.mod h1:Jq0nHtvxabKE5EMwAAdgTaz7dfWE8C4i11NOltxGQpc= github.com/boynton/repl v0.0.0-20170116235056-348863958e3e/go.mod h1:Crc/GCZ3NXDVCio7Yr0o+SSrytpcFhLmVCIzi0s49t4= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -293,6 +305,7 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -411,8 +424,9 @@ github.com/go-python/gpython v0.0.3/go.mod h1:bmk0l57W/7Cs67MMnz4U28SoYyvz5NTMYy github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-resty/resty/v2 v2.3.0/go.mod h1:UpN9CgLZNsv4e9XG50UU8xdI0F43UQ4HmxLBDwaroHU= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-swagger/go-swagger v0.25.0 h1:FxhyrWWV8V/A9P6GtI5szWordAdbb6Y0nqdY/y9So2w= @@ -488,6 +502,8 @@ github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -496,8 +512,10 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v31 v31.0.0/go.mod h1:NQPZol8/1sMoWYGN2yaALIBytu17gAWfhbweiEed3pM= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -536,6 +554,8 @@ github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YAR github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.0.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -562,9 +582,11 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= @@ -589,12 +611,14 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/iancoleman/strcase v0.1.1/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -613,15 +637,23 @@ github.com/jawher/mow.cli v1.0.4/go.mod h1:5hQj2V8g+qYmLUVWqu4Wuja1pI57M83EChYLV github.com/jawher/mow.cli v1.1.0/go.mod h1:aNaQlc7ozF3vw6IJ2dHjp2ZFiA4ozMIYY6PyuRJwlUg= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/joncalhoun/qson v0.0.0-20200422171543-84433dcd3da0/go.mod h1:DFXrEwSRX0p/aSvxE21319menCBFeQO0jXpRj7LEZUA= @@ -640,6 +672,7 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b/go.mod h1:HMcgvsgd0Fjj4XXDkbjdmlbI505rUPBs6WBMYg2pXks= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= @@ -658,8 +691,9 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.9 h1:5OCMOdde1TCT2sookEuVeEZzA8bmRSFV3AwPDZAG8AA= -github.com/klauspost/compress v1.11.9/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ= +github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -683,8 +717,9 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/labstack/echo v3.2.1+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s= github.com/labstack/gommon v0.2.7/go.mod h1:/tj9csK2iPSBvn+3NLM9e52usepMtrd5ilFYA+wQNJ4= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M= @@ -713,6 +748,7 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -733,8 +769,9 @@ github.com/minio/minio-go/v7 v7.0.2/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6J github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.1.1 h1:Bp6x9R1Wn16SIz3OfeDr0b7RnCG2OB66Y7PQyC/cvq4= +github.com/mitchellh/copystructure v1.1.1/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMFB9XyNgFNmRkZZU4= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -771,17 +808,23 @@ github.com/nats-io/go-nats v1.7.2/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1 github.com/nats-io/graft v0.0.0-20200605173148-348798afea05/go.mod h1:idnzXeCwCx69FMg+R0DyD4/OhrF1A+v3BqF5xSz+tS4= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v1.1.0/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= github.com/nats-io/nats-server/v2 v2.1.7/go.mod h1:rbRrRE/Iv93O/rUvZ9dh4NfT0Cm9HWjW/BqOWLGgYiE= +github.com/nats-io/nats-server/v2 v2.1.9/go.mod h1:9qVyoewoYXzG1ME9ox0HwkkzyYvnlBDugfR4Gg/8uHU= github.com/nats-io/nats-streaming-server v0.17.0/go.mod h1:ewPBEsmp62Znl3dcRsYtlcfwudxHEdYMtYqUQSt4fE0= +github.com/nats-io/nats-streaming-server v0.21.1/go.mod h1:2W8QfNVOtcFpmf0bRiwuLtRb0/hkX4NuOxPOFNOThVQ= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= +github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/stan.go v0.6.0/go.mod h1:eIcD5bi3pqbHT/xIIvXMwvzXYElgouBvaVRftaE+eac= +github.com/nats-io/stan.go v0.8.3/go.mod h1:Ejm8bbHnMTSptU6uNMAVuxeapMJYBB/Ml3ej6z4GoSY= github.com/nicksnyder/go-i18n v1.10.1-0.20190510212457-b280125b035a/go.mod h1:e4Di5xjP9oTVrC6y3C7C0HoSYXjSbhh/dU0eUV32nB4= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= @@ -823,6 +866,7 @@ github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= @@ -837,6 +881,8 @@ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0 github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -882,17 +928,20 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/radovskyb/watcher v1.0.7/go.mod h1:78okwvY5wPdzcb1UYnip1pvrZNIVEIh/Cm+ZuvsUYIg= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/robfig/cron/v3 v3.0.0/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -1021,8 +1070,10 @@ github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0B github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1109,9 +1160,13 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1194,8 +1249,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226101413-39120d07d75e/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1213,6 +1269,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1252,6 +1309,7 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1262,6 +1320,7 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1277,12 +1336,15 @@ golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1318,6 +1380,7 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1362,8 +1425,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.3-0.20210608163600-9ed039809d4c h1:Pv9gNyJFYVdpUAVZYJ1BDSU4eGgXQ+0f3DIGAdolO5s= -golang.org/x/tools v0.1.3-0.20210608163600-9ed039809d4c/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1591,6 +1654,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8= sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= sigs.k8s.io/controller-tools v0.4.1 h1:VkuV0MxlRPmRu5iTgBZU4UxUX2LiR99n3sdQGRxZF4w= sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= diff --git a/pkg/apiclient/argo-kube-client.go b/pkg/apiclient/argo-kube-client.go index 911bc670dd9f..35b39cdca270 100644 --- a/pkg/apiclient/argo-kube-client.go +++ b/pkg/apiclient/argo-kube-client.go @@ -6,6 +6,7 @@ import ( eventsource "github.com/argoproj/argo-events/pkg/client/eventsource/clientset/versioned" sensor "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" @@ -43,6 +44,10 @@ func newArgoKubeClient(clientConfig clientcmd.ClientConfig, instanceIDService in if err != nil { return nil, nil, err } + dynamicClient, err := dynamic.NewForConfig(restConfig) + if err != nil { + return nil, nil, fmt.Errorf("failure to create dynamic client: %w", err) + } wfClient, err := workflow.NewForConfig(restConfig) if err != nil { return nil, nil, err @@ -59,7 +64,13 @@ func newArgoKubeClient(clientConfig clientcmd.ClientConfig, instanceIDService in if err != nil { return nil, nil, err } - clients := &types.Clients{Workflow: wfClient, EventSource: eventSourceInterface, Sensor: sensorInterface, Kubernetes: kubeClient} + clients := &types.Clients{ + Dynamic: dynamicClient, + EventSource: eventSourceInterface, + Kubernetes: kubeClient, + Sensor: sensorInterface, + Workflow: wfClient, + } gatekeeper, err := auth.NewGatekeeper(auth.Modes{auth.Server: true}, clients, restConfig, nil, auth.DefaultClientForAuthorization, "unused") if err != nil { return nil, nil, err diff --git a/pkg/apiclient/pipeline/forwarder_overwrite.go b/pkg/apiclient/pipeline/forwarder_overwrite.go new file mode 100644 index 000000000000..9dd891875d68 --- /dev/null +++ b/pkg/apiclient/pipeline/forwarder_overwrite.go @@ -0,0 +1,11 @@ +package pipeline + +import ( + "github.com/argoproj/pkg/grpc/http" +) + +func init() { + forward_PipelineService_WatchPipelines_0 = http.StreamForwarder + forward_PipelineService_PipelineLogs_0 = http.StreamForwarder + forward_PipelineService_WatchSteps_0 = http.StreamForwarder +} diff --git a/pkg/apiclient/pipeline/pipeline.pb.go b/pkg/apiclient/pipeline/pipeline.pb.go new file mode 100644 index 000000000000..e1763843a883 --- /dev/null +++ b/pkg/apiclient/pipeline/pipeline.pb.go @@ -0,0 +1,3408 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/apiclient/pipeline/pipeline.proto + +package pipeline + +import ( + context "context" + fmt "fmt" + v1alpha1 "github.com/argoproj-labs/argo-dataflow/api/v1alpha1" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ListPipelinesRequest struct { + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + ListOptions *v1.ListOptions `protobuf:"bytes,2,opt,name=listOptions,proto3" json:"listOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListPipelinesRequest) Reset() { *m = ListPipelinesRequest{} } +func (m *ListPipelinesRequest) String() string { return proto.CompactTextString(m) } +func (*ListPipelinesRequest) ProtoMessage() {} +func (*ListPipelinesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{0} +} +func (m *ListPipelinesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListPipelinesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListPipelinesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListPipelinesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListPipelinesRequest.Merge(m, src) +} +func (m *ListPipelinesRequest) XXX_Size() int { + return m.Size() +} +func (m *ListPipelinesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListPipelinesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListPipelinesRequest proto.InternalMessageInfo + +func (m *ListPipelinesRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *ListPipelinesRequest) GetListOptions() *v1.ListOptions { + if m != nil { + return m.ListOptions + } + return nil +} + +type PipelineWatchEvent struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Object *v1alpha1.Pipeline `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PipelineWatchEvent) Reset() { *m = PipelineWatchEvent{} } +func (m *PipelineWatchEvent) String() string { return proto.CompactTextString(m) } +func (*PipelineWatchEvent) ProtoMessage() {} +func (*PipelineWatchEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{1} +} +func (m *PipelineWatchEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PipelineWatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PipelineWatchEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PipelineWatchEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_PipelineWatchEvent.Merge(m, src) +} +func (m *PipelineWatchEvent) XXX_Size() int { + return m.Size() +} +func (m *PipelineWatchEvent) XXX_DiscardUnknown() { + xxx_messageInfo_PipelineWatchEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_PipelineWatchEvent proto.InternalMessageInfo + +func (m *PipelineWatchEvent) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PipelineWatchEvent) GetObject() *v1alpha1.Pipeline { + if m != nil { + return m.Object + } + return nil +} + +type GetPipelineRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + GetOptions *v1.GetOptions `protobuf:"bytes,3,opt,name=getOptions,proto3" json:"getOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPipelineRequest) Reset() { *m = GetPipelineRequest{} } +func (m *GetPipelineRequest) String() string { return proto.CompactTextString(m) } +func (*GetPipelineRequest) ProtoMessage() {} +func (*GetPipelineRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{2} +} +func (m *GetPipelineRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetPipelineRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetPipelineRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetPipelineRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPipelineRequest.Merge(m, src) +} +func (m *GetPipelineRequest) XXX_Size() int { + return m.Size() +} +func (m *GetPipelineRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPipelineRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPipelineRequest proto.InternalMessageInfo + +func (m *GetPipelineRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetPipelineRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *GetPipelineRequest) GetGetOptions() *v1.GetOptions { + if m != nil { + return m.GetOptions + } + return nil +} + +type RestartPipelineRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestartPipelineRequest) Reset() { *m = RestartPipelineRequest{} } +func (m *RestartPipelineRequest) String() string { return proto.CompactTextString(m) } +func (*RestartPipelineRequest) ProtoMessage() {} +func (*RestartPipelineRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{3} +} +func (m *RestartPipelineRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RestartPipelineRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RestartPipelineRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RestartPipelineRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestartPipelineRequest.Merge(m, src) +} +func (m *RestartPipelineRequest) XXX_Size() int { + return m.Size() +} +func (m *RestartPipelineRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RestartPipelineRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RestartPipelineRequest proto.InternalMessageInfo + +func (m *RestartPipelineRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RestartPipelineRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +type RestartPipelineResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestartPipelineResponse) Reset() { *m = RestartPipelineResponse{} } +func (m *RestartPipelineResponse) String() string { return proto.CompactTextString(m) } +func (*RestartPipelineResponse) ProtoMessage() {} +func (*RestartPipelineResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{4} +} +func (m *RestartPipelineResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RestartPipelineResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RestartPipelineResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RestartPipelineResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestartPipelineResponse.Merge(m, src) +} +func (m *RestartPipelineResponse) XXX_Size() int { + return m.Size() +} +func (m *RestartPipelineResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RestartPipelineResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RestartPipelineResponse proto.InternalMessageInfo + +type DeletePipelineRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + DeleteOptions *v1.DeleteOptions `protobuf:"bytes,3,opt,name=deleteOptions,proto3" json:"deleteOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeletePipelineRequest) Reset() { *m = DeletePipelineRequest{} } +func (m *DeletePipelineRequest) String() string { return proto.CompactTextString(m) } +func (*DeletePipelineRequest) ProtoMessage() {} +func (*DeletePipelineRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{5} +} +func (m *DeletePipelineRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeletePipelineRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeletePipelineRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeletePipelineRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeletePipelineRequest.Merge(m, src) +} +func (m *DeletePipelineRequest) XXX_Size() int { + return m.Size() +} +func (m *DeletePipelineRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeletePipelineRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeletePipelineRequest proto.InternalMessageInfo + +func (m *DeletePipelineRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeletePipelineRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *DeletePipelineRequest) GetDeleteOptions() *v1.DeleteOptions { + if m != nil { + return m.DeleteOptions + } + return nil +} + +type DeletePipelineResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeletePipelineResponse) Reset() { *m = DeletePipelineResponse{} } +func (m *DeletePipelineResponse) String() string { return proto.CompactTextString(m) } +func (*DeletePipelineResponse) ProtoMessage() {} +func (*DeletePipelineResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{6} +} +func (m *DeletePipelineResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeletePipelineResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeletePipelineResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeletePipelineResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeletePipelineResponse.Merge(m, src) +} +func (m *DeletePipelineResponse) XXX_Size() int { + return m.Size() +} +func (m *DeletePipelineResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeletePipelineResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeletePipelineResponse proto.InternalMessageInfo + +type WatchStepRequest struct { + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + ListOptions *v1.ListOptions `protobuf:"bytes,2,opt,name=listOptions,proto3" json:"listOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchStepRequest) Reset() { *m = WatchStepRequest{} } +func (m *WatchStepRequest) String() string { return proto.CompactTextString(m) } +func (*WatchStepRequest) ProtoMessage() {} +func (*WatchStepRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{7} +} +func (m *WatchStepRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchStepRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WatchStepRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WatchStepRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchStepRequest.Merge(m, src) +} +func (m *WatchStepRequest) XXX_Size() int { + return m.Size() +} +func (m *WatchStepRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WatchStepRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchStepRequest proto.InternalMessageInfo + +func (m *WatchStepRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *WatchStepRequest) GetListOptions() *v1.ListOptions { + if m != nil { + return m.ListOptions + } + return nil +} + +type StepWatchEvent struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Object *v1alpha1.Step `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StepWatchEvent) Reset() { *m = StepWatchEvent{} } +func (m *StepWatchEvent) String() string { return proto.CompactTextString(m) } +func (*StepWatchEvent) ProtoMessage() {} +func (*StepWatchEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{8} +} +func (m *StepWatchEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StepWatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StepWatchEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StepWatchEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_StepWatchEvent.Merge(m, src) +} +func (m *StepWatchEvent) XXX_Size() int { + return m.Size() +} +func (m *StepWatchEvent) XXX_DiscardUnknown() { + xxx_messageInfo_StepWatchEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_StepWatchEvent proto.InternalMessageInfo + +func (m *StepWatchEvent) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *StepWatchEvent) GetObject() *v1alpha1.Step { + if m != nil { + return m.Object + } + return nil +} + +type PipelineLogsRequest struct { + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + // optional - only return entries for this pipeline + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // optional - only return entries for this step + StepName string `protobuf:"bytes,3,opt,name=stepName,proto3" json:"stepName,omitempty"` + // optional - only return entries which match this expresssion + Grep string `protobuf:"bytes,4,opt,name=grep,proto3" json:"grep,omitempty"` + PodLogOptions *v11.PodLogOptions `protobuf:"bytes,5,opt,name=podLogOptions,proto3" json:"podLogOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PipelineLogsRequest) Reset() { *m = PipelineLogsRequest{} } +func (m *PipelineLogsRequest) String() string { return proto.CompactTextString(m) } +func (*PipelineLogsRequest) ProtoMessage() {} +func (*PipelineLogsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{9} +} +func (m *PipelineLogsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PipelineLogsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PipelineLogsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PipelineLogsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PipelineLogsRequest.Merge(m, src) +} +func (m *PipelineLogsRequest) XXX_Size() int { + return m.Size() +} +func (m *PipelineLogsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PipelineLogsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PipelineLogsRequest proto.InternalMessageInfo + +func (m *PipelineLogsRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *PipelineLogsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PipelineLogsRequest) GetStepName() string { + if m != nil { + return m.StepName + } + return "" +} + +func (m *PipelineLogsRequest) GetGrep() string { + if m != nil { + return m.Grep + } + return "" +} + +func (m *PipelineLogsRequest) GetPodLogOptions() *v11.PodLogOptions { + if m != nil { + return m.PodLogOptions + } + return nil +} + +// structured log entry +type LogEntry struct { + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + PipelineName string `protobuf:"bytes,2,opt,name=pipelineName,proto3" json:"pipelineName,omitempty"` + StepName string `protobuf:"bytes,3,opt,name=stepName,proto3" json:"stepName,omitempty"` + Time *v1.Time `protobuf:"bytes,6,opt,name=time,proto3" json:"time,omitempty"` + Msg string `protobuf:"bytes,7,opt,name=msg,proto3" json:"msg,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogEntry) Reset() { *m = LogEntry{} } +func (m *LogEntry) String() string { return proto.CompactTextString(m) } +func (*LogEntry) ProtoMessage() {} +func (*LogEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_71dfb1a81115c785, []int{10} +} +func (m *LogEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LogEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogEntry.Merge(m, src) +} +func (m *LogEntry) XXX_Size() int { + return m.Size() +} +func (m *LogEntry) XXX_DiscardUnknown() { + xxx_messageInfo_LogEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_LogEntry proto.InternalMessageInfo + +func (m *LogEntry) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *LogEntry) GetPipelineName() string { + if m != nil { + return m.PipelineName + } + return "" +} + +func (m *LogEntry) GetStepName() string { + if m != nil { + return m.StepName + } + return "" +} + +func (m *LogEntry) GetTime() *v1.Time { + if m != nil { + return m.Time + } + return nil +} + +func (m *LogEntry) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +func init() { + proto.RegisterType((*ListPipelinesRequest)(nil), "pipeline.ListPipelinesRequest") + proto.RegisterType((*PipelineWatchEvent)(nil), "pipeline.PipelineWatchEvent") + proto.RegisterType((*GetPipelineRequest)(nil), "pipeline.GetPipelineRequest") + proto.RegisterType((*RestartPipelineRequest)(nil), "pipeline.RestartPipelineRequest") + proto.RegisterType((*RestartPipelineResponse)(nil), "pipeline.RestartPipelineResponse") + proto.RegisterType((*DeletePipelineRequest)(nil), "pipeline.DeletePipelineRequest") + proto.RegisterType((*DeletePipelineResponse)(nil), "pipeline.DeletePipelineResponse") + proto.RegisterType((*WatchStepRequest)(nil), "pipeline.WatchStepRequest") + proto.RegisterType((*StepWatchEvent)(nil), "pipeline.StepWatchEvent") + proto.RegisterType((*PipelineLogsRequest)(nil), "pipeline.PipelineLogsRequest") + proto.RegisterType((*LogEntry)(nil), "pipeline.LogEntry") +} + +func init() { + proto.RegisterFile("pkg/apiclient/pipeline/pipeline.proto", fileDescriptor_71dfb1a81115c785) +} + +var fileDescriptor_71dfb1a81115c785 = []byte{ + // 864 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x56, 0xcf, 0x6f, 0xdc, 0x44, + 0x14, 0xd6, 0x24, 0x21, 0x4d, 0x5f, 0x9a, 0xb4, 0x1a, 0xa0, 0x2c, 0x56, 0x9a, 0x6e, 0x47, 0x14, + 0x85, 0xd2, 0x8e, 0xb3, 0x6d, 0x0f, 0xf4, 0x00, 0x12, 0xd0, 0x2a, 0x12, 0x5a, 0x95, 0xe0, 0x80, + 0x10, 0x5c, 0xaa, 0x89, 0x33, 0x78, 0xdd, 0xb5, 0x3d, 0x83, 0x67, 0xba, 0x51, 0x84, 0x0a, 0x02, + 0x89, 0x03, 0x12, 0x37, 0x2e, 0xe5, 0xce, 0x5f, 0xc1, 0x09, 0x89, 0x0b, 0x47, 0x24, 0xfe, 0x01, + 0x14, 0xf1, 0x87, 0xa0, 0x99, 0xf5, 0xf8, 0xc7, 0xae, 0x69, 0x2d, 0xf6, 0xd0, 0xdb, 0xcc, 0xf8, + 0xbd, 0xef, 0x7d, 0xef, 0xf3, 0xf3, 0xe7, 0x81, 0xab, 0x72, 0x1c, 0xf9, 0x4c, 0xc6, 0x61, 0x12, + 0xf3, 0x4c, 0xfb, 0x32, 0x96, 0x3c, 0x89, 0x33, 0x5e, 0x2e, 0xa8, 0xcc, 0x85, 0x16, 0x78, 0xcd, + 0xed, 0xbd, 0xad, 0x48, 0x88, 0x28, 0xe1, 0x26, 0xc7, 0x67, 0x59, 0x26, 0x34, 0xd3, 0xb1, 0xc8, + 0xd4, 0x34, 0xce, 0xbb, 0x3d, 0x7e, 0x4b, 0xd1, 0x58, 0x98, 0xa7, 0x29, 0x0b, 0x47, 0x71, 0xc6, + 0xf3, 0x13, 0xbf, 0x28, 0xa1, 0xfc, 0x94, 0x6b, 0xe6, 0x4f, 0x06, 0x7e, 0xc4, 0x33, 0x9e, 0x33, + 0xcd, 0x8f, 0x8a, 0x2c, 0x52, 0x65, 0xf9, 0xa1, 0xc8, 0x79, 0x5b, 0xcc, 0xfb, 0x51, 0xac, 0x47, + 0x8f, 0x0e, 0x69, 0x28, 0x52, 0x9f, 0xe5, 0x91, 0x90, 0xb9, 0x78, 0x78, 0x23, 0x61, 0x87, 0xca, + 0xee, 0x6e, 0x1c, 0x31, 0xcd, 0xbe, 0x48, 0xc4, 0xb1, 0xc5, 0x98, 0x0c, 0x58, 0x22, 0x47, 0x6c, + 0x0e, 0x84, 0xfc, 0x80, 0xe0, 0xa5, 0x61, 0xac, 0xf4, 0x7e, 0xd1, 0x8d, 0x0a, 0xf8, 0x97, 0x8f, + 0xb8, 0xd2, 0x78, 0x0b, 0xce, 0x66, 0x2c, 0xe5, 0x4a, 0xb2, 0x90, 0xf7, 0x50, 0x1f, 0xed, 0x9c, + 0x0d, 0xaa, 0x03, 0x7c, 0x00, 0xeb, 0x49, 0xac, 0xf4, 0x87, 0xd2, 0xb6, 0xda, 0x5b, 0xea, 0xa3, + 0x9d, 0xf5, 0x9b, 0x03, 0x3a, 0x65, 0x4d, 0xeb, 0xbd, 0x52, 0x39, 0x8e, 0xcc, 0x81, 0xa2, 0xa6, + 0x57, 0x3a, 0x19, 0xd0, 0x61, 0x95, 0x18, 0xd4, 0x51, 0xc8, 0x37, 0x80, 0x1d, 0x8d, 0x4f, 0x99, + 0x0e, 0x47, 0xf7, 0x26, 0x3c, 0xd3, 0x18, 0xc3, 0x8a, 0x3e, 0x91, 0x8e, 0x83, 0x5d, 0xe3, 0x4f, + 0x60, 0x55, 0x1c, 0x3e, 0xe4, 0xa1, 0x2e, 0x2a, 0xbf, 0x4d, 0x2b, 0x2d, 0xa8, 0xd3, 0xe2, 0x81, + 0xd1, 0xc2, 0xee, 0x1e, 0x38, 0x2d, 0x0c, 0x11, 0xea, 0xb4, 0xa0, 0xae, 0x58, 0x50, 0x80, 0x91, + 0x27, 0x08, 0xf0, 0x1e, 0x2f, 0xb5, 0x70, 0x52, 0x60, 0x58, 0x31, 0x9d, 0x3b, 0x06, 0x66, 0xdd, + 0x94, 0x67, 0x69, 0x56, 0x9e, 0x7d, 0x80, 0x88, 0x97, 0xea, 0x2c, 0x5b, 0x8e, 0xbb, 0xdd, 0xd4, + 0xd9, 0x2b, 0xf3, 0x82, 0x1a, 0x06, 0xf9, 0x00, 0x2e, 0x06, 0x5c, 0x69, 0x96, 0x2f, 0xce, 0x8e, + 0xbc, 0x0a, 0xaf, 0xcc, 0x61, 0x29, 0x29, 0x32, 0xc5, 0xc9, 0x2f, 0x08, 0x5e, 0xbe, 0xcb, 0x13, + 0xae, 0xf9, 0xe2, 0x22, 0x7c, 0x06, 0x1b, 0x47, 0x16, 0xaa, 0xa9, 0xc3, 0xad, 0x6e, 0x3a, 0xdc, + 0xad, 0xa7, 0x06, 0x4d, 0x24, 0xd2, 0x83, 0x8b, 0xb3, 0x2c, 0x8b, 0x06, 0xbe, 0x47, 0x70, 0xc1, + 0x0e, 0xcf, 0x81, 0xe6, 0xf2, 0x39, 0xce, 0xf2, 0x31, 0x6c, 0x1a, 0x06, 0xcf, 0x98, 0xe3, 0x8f, + 0x66, 0xe6, 0xf8, 0xce, 0xff, 0x9a, 0x63, 0xdb, 0xaa, 0x9b, 0xe1, 0xdf, 0x10, 0xbc, 0xe8, 0x54, + 0x19, 0x8a, 0xa8, 0xe3, 0xf7, 0xec, 0xde, 0xee, 0x52, 0xed, 0xed, 0x7a, 0xb0, 0xa6, 0x34, 0x97, + 0xf7, 0xcd, 0xf9, 0xb2, 0x3d, 0x2f, 0xf7, 0x26, 0x3e, 0xca, 0xb9, 0xec, 0xad, 0x4c, 0xe3, 0xcd, + 0x1a, 0xef, 0xc1, 0x86, 0x14, 0x47, 0x43, 0x11, 0x39, 0x25, 0x5f, 0xb0, 0x3d, 0x5d, 0xa9, 0x29, + 0x49, 0x8d, 0x97, 0x19, 0xdd, 0xf6, 0xeb, 0x81, 0x41, 0x33, 0x8f, 0xfc, 0x8a, 0x60, 0x6d, 0x28, + 0xa2, 0x7b, 0x99, 0xce, 0x4f, 0x9e, 0xc1, 0x9b, 0xc0, 0x39, 0xe7, 0xc3, 0xf7, 0x2b, 0xfe, 0x8d, + 0xb3, 0xa7, 0xf6, 0xf1, 0x0e, 0xac, 0xe8, 0x38, 0xe5, 0xbd, 0x55, 0x4b, 0xf5, 0x5a, 0xb7, 0x97, + 0xfe, 0x71, 0x9c, 0xf2, 0xc0, 0xe6, 0xe1, 0x0b, 0xb0, 0x9c, 0xaa, 0xa8, 0x77, 0xc6, 0xc2, 0x9a, + 0xe5, 0xcd, 0xdf, 0xcf, 0xc0, 0x79, 0xa7, 0xff, 0x01, 0xcf, 0x27, 0x71, 0xc8, 0xf1, 0xcf, 0x08, + 0x36, 0x1a, 0x26, 0x8b, 0xb7, 0x69, 0xf9, 0x3b, 0x69, 0x73, 0x5f, 0xef, 0xdd, 0x85, 0x0c, 0xcd, + 0x40, 0x92, 0xab, 0xdf, 0xfd, 0xf5, 0xcf, 0x4f, 0x4b, 0x97, 0xf1, 0xa5, 0xe2, 0x07, 0x50, 0xfe, + 0xc0, 0x94, 0xff, 0x55, 0x29, 0xe0, 0x63, 0xfc, 0x35, 0x6c, 0xda, 0x21, 0xed, 0xce, 0x6d, 0xab, + 0x7a, 0x3e, 0x6f, 0xd7, 0xe4, 0xba, 0x2d, 0xfb, 0x3a, 0x7e, 0xcd, 0x95, 0x55, 0x3a, 0xe7, 0x2c, + 0x6d, 0xaf, 0xbe, 0x8b, 0xf0, 0x13, 0x04, 0xeb, 0x35, 0xcf, 0xc5, 0x35, 0xf4, 0x79, 0x2b, 0xf6, + 0x16, 0x33, 0xfa, 0x79, 0x72, 0xad, 0xac, 0xa6, 0xeb, 0xc7, 0xf8, 0x47, 0x04, 0xe7, 0x67, 0x8c, + 0x12, 0xf7, 0x2b, 0x7a, 0xed, 0x7e, 0xec, 0x5d, 0x79, 0x4a, 0x44, 0x61, 0x52, 0xb7, 0x2d, 0x0d, + 0x4a, 0xae, 0x77, 0xa1, 0xe1, 0xe7, 0x53, 0x14, 0xfc, 0x2d, 0x82, 0xcd, 0xa6, 0xeb, 0xe1, 0xcb, + 0x55, 0xad, 0x56, 0xd7, 0xf6, 0xfa, 0xff, 0x1d, 0x50, 0x70, 0x29, 0x24, 0xb9, 0xd6, 0x4d, 0x92, + 0x09, 0x9c, 0xab, 0x9b, 0x0b, 0xbe, 0x34, 0x3f, 0x0b, 0x35, 0xd3, 0xf1, 0x70, 0x6d, 0x94, 0x8a, + 0x0f, 0x9a, 0x0c, 0x6c, 0xc1, 0x37, 0xf1, 0x1b, 0x5d, 0x06, 0xc4, 0x4f, 0x44, 0xa4, 0x76, 0x11, + 0xce, 0x00, 0x4a, 0x57, 0x57, 0xd8, 0xab, 0x60, 0x67, 0xbd, 0xde, 0xeb, 0x55, 0xcf, 0x9a, 0x06, + 0x4c, 0x76, 0x6c, 0x61, 0x82, 0xfb, 0x33, 0x85, 0x8d, 0x19, 0xcc, 0x4c, 0xe5, 0x7b, 0x7b, 0x7f, + 0x9c, 0x6e, 0xa3, 0x3f, 0x4f, 0xb7, 0xd1, 0xdf, 0xa7, 0xdb, 0xe8, 0xf3, 0x3b, 0x2d, 0x37, 0xad, + 0xe9, 0x25, 0xeb, 0x58, 0xe4, 0x63, 0x33, 0x6f, 0xca, 0x6f, 0xbf, 0x35, 0x1e, 0xae, 0xda, 0x6b, + 0xd6, 0xad, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xa0, 0x67, 0xda, 0x56, 0x0a, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// PipelineServiceClient is the client API for PipelineService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PipelineServiceClient interface { + ListPipelines(ctx context.Context, in *ListPipelinesRequest, opts ...grpc.CallOption) (*v1alpha1.PipelineList, error) + WatchPipelines(ctx context.Context, in *ListPipelinesRequest, opts ...grpc.CallOption) (PipelineService_WatchPipelinesClient, error) + GetPipeline(ctx context.Context, in *GetPipelineRequest, opts ...grpc.CallOption) (*v1alpha1.Pipeline, error) + RestartPipeline(ctx context.Context, in *RestartPipelineRequest, opts ...grpc.CallOption) (*RestartPipelineResponse, error) + DeletePipeline(ctx context.Context, in *DeletePipelineRequest, opts ...grpc.CallOption) (*DeletePipelineResponse, error) + PipelineLogs(ctx context.Context, in *PipelineLogsRequest, opts ...grpc.CallOption) (PipelineService_PipelineLogsClient, error) + WatchSteps(ctx context.Context, in *WatchStepRequest, opts ...grpc.CallOption) (PipelineService_WatchStepsClient, error) +} + +type pipelineServiceClient struct { + cc *grpc.ClientConn +} + +func NewPipelineServiceClient(cc *grpc.ClientConn) PipelineServiceClient { + return &pipelineServiceClient{cc} +} + +func (c *pipelineServiceClient) ListPipelines(ctx context.Context, in *ListPipelinesRequest, opts ...grpc.CallOption) (*v1alpha1.PipelineList, error) { + out := new(v1alpha1.PipelineList) + err := c.cc.Invoke(ctx, "/pipeline.PipelineService/ListPipelines", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pipelineServiceClient) WatchPipelines(ctx context.Context, in *ListPipelinesRequest, opts ...grpc.CallOption) (PipelineService_WatchPipelinesClient, error) { + stream, err := c.cc.NewStream(ctx, &_PipelineService_serviceDesc.Streams[0], "/pipeline.PipelineService/WatchPipelines", opts...) + if err != nil { + return nil, err + } + x := &pipelineServiceWatchPipelinesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type PipelineService_WatchPipelinesClient interface { + Recv() (*PipelineWatchEvent, error) + grpc.ClientStream +} + +type pipelineServiceWatchPipelinesClient struct { + grpc.ClientStream +} + +func (x *pipelineServiceWatchPipelinesClient) Recv() (*PipelineWatchEvent, error) { + m := new(PipelineWatchEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *pipelineServiceClient) GetPipeline(ctx context.Context, in *GetPipelineRequest, opts ...grpc.CallOption) (*v1alpha1.Pipeline, error) { + out := new(v1alpha1.Pipeline) + err := c.cc.Invoke(ctx, "/pipeline.PipelineService/GetPipeline", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pipelineServiceClient) RestartPipeline(ctx context.Context, in *RestartPipelineRequest, opts ...grpc.CallOption) (*RestartPipelineResponse, error) { + out := new(RestartPipelineResponse) + err := c.cc.Invoke(ctx, "/pipeline.PipelineService/RestartPipeline", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pipelineServiceClient) DeletePipeline(ctx context.Context, in *DeletePipelineRequest, opts ...grpc.CallOption) (*DeletePipelineResponse, error) { + out := new(DeletePipelineResponse) + err := c.cc.Invoke(ctx, "/pipeline.PipelineService/DeletePipeline", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pipelineServiceClient) PipelineLogs(ctx context.Context, in *PipelineLogsRequest, opts ...grpc.CallOption) (PipelineService_PipelineLogsClient, error) { + stream, err := c.cc.NewStream(ctx, &_PipelineService_serviceDesc.Streams[1], "/pipeline.PipelineService/PipelineLogs", opts...) + if err != nil { + return nil, err + } + x := &pipelineServicePipelineLogsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type PipelineService_PipelineLogsClient interface { + Recv() (*LogEntry, error) + grpc.ClientStream +} + +type pipelineServicePipelineLogsClient struct { + grpc.ClientStream +} + +func (x *pipelineServicePipelineLogsClient) Recv() (*LogEntry, error) { + m := new(LogEntry) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *pipelineServiceClient) WatchSteps(ctx context.Context, in *WatchStepRequest, opts ...grpc.CallOption) (PipelineService_WatchStepsClient, error) { + stream, err := c.cc.NewStream(ctx, &_PipelineService_serviceDesc.Streams[2], "/pipeline.PipelineService/WatchSteps", opts...) + if err != nil { + return nil, err + } + x := &pipelineServiceWatchStepsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type PipelineService_WatchStepsClient interface { + Recv() (*StepWatchEvent, error) + grpc.ClientStream +} + +type pipelineServiceWatchStepsClient struct { + grpc.ClientStream +} + +func (x *pipelineServiceWatchStepsClient) Recv() (*StepWatchEvent, error) { + m := new(StepWatchEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// PipelineServiceServer is the server API for PipelineService service. +type PipelineServiceServer interface { + ListPipelines(context.Context, *ListPipelinesRequest) (*v1alpha1.PipelineList, error) + WatchPipelines(*ListPipelinesRequest, PipelineService_WatchPipelinesServer) error + GetPipeline(context.Context, *GetPipelineRequest) (*v1alpha1.Pipeline, error) + RestartPipeline(context.Context, *RestartPipelineRequest) (*RestartPipelineResponse, error) + DeletePipeline(context.Context, *DeletePipelineRequest) (*DeletePipelineResponse, error) + PipelineLogs(*PipelineLogsRequest, PipelineService_PipelineLogsServer) error + WatchSteps(*WatchStepRequest, PipelineService_WatchStepsServer) error +} + +// UnimplementedPipelineServiceServer can be embedded to have forward compatible implementations. +type UnimplementedPipelineServiceServer struct { +} + +func (*UnimplementedPipelineServiceServer) ListPipelines(ctx context.Context, req *ListPipelinesRequest) (*v1alpha1.PipelineList, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListPipelines not implemented") +} +func (*UnimplementedPipelineServiceServer) WatchPipelines(req *ListPipelinesRequest, srv PipelineService_WatchPipelinesServer) error { + return status.Errorf(codes.Unimplemented, "method WatchPipelines not implemented") +} +func (*UnimplementedPipelineServiceServer) GetPipeline(ctx context.Context, req *GetPipelineRequest) (*v1alpha1.Pipeline, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPipeline not implemented") +} +func (*UnimplementedPipelineServiceServer) RestartPipeline(ctx context.Context, req *RestartPipelineRequest) (*RestartPipelineResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestartPipeline not implemented") +} +func (*UnimplementedPipelineServiceServer) DeletePipeline(ctx context.Context, req *DeletePipelineRequest) (*DeletePipelineResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeletePipeline not implemented") +} +func (*UnimplementedPipelineServiceServer) PipelineLogs(req *PipelineLogsRequest, srv PipelineService_PipelineLogsServer) error { + return status.Errorf(codes.Unimplemented, "method PipelineLogs not implemented") +} +func (*UnimplementedPipelineServiceServer) WatchSteps(req *WatchStepRequest, srv PipelineService_WatchStepsServer) error { + return status.Errorf(codes.Unimplemented, "method WatchSteps not implemented") +} + +func RegisterPipelineServiceServer(s *grpc.Server, srv PipelineServiceServer) { + s.RegisterService(&_PipelineService_serviceDesc, srv) +} + +func _PipelineService_ListPipelines_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPipelinesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelineServiceServer).ListPipelines(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pipeline.PipelineService/ListPipelines", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelineServiceServer).ListPipelines(ctx, req.(*ListPipelinesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PipelineService_WatchPipelines_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListPipelinesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(PipelineServiceServer).WatchPipelines(m, &pipelineServiceWatchPipelinesServer{stream}) +} + +type PipelineService_WatchPipelinesServer interface { + Send(*PipelineWatchEvent) error + grpc.ServerStream +} + +type pipelineServiceWatchPipelinesServer struct { + grpc.ServerStream +} + +func (x *pipelineServiceWatchPipelinesServer) Send(m *PipelineWatchEvent) error { + return x.ServerStream.SendMsg(m) +} + +func _PipelineService_GetPipeline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPipelineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelineServiceServer).GetPipeline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pipeline.PipelineService/GetPipeline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelineServiceServer).GetPipeline(ctx, req.(*GetPipelineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PipelineService_RestartPipeline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestartPipelineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelineServiceServer).RestartPipeline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pipeline.PipelineService/RestartPipeline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelineServiceServer).RestartPipeline(ctx, req.(*RestartPipelineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PipelineService_DeletePipeline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeletePipelineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelineServiceServer).DeletePipeline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pipeline.PipelineService/DeletePipeline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelineServiceServer).DeletePipeline(ctx, req.(*DeletePipelineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PipelineService_PipelineLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(PipelineLogsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(PipelineServiceServer).PipelineLogs(m, &pipelineServicePipelineLogsServer{stream}) +} + +type PipelineService_PipelineLogsServer interface { + Send(*LogEntry) error + grpc.ServerStream +} + +type pipelineServicePipelineLogsServer struct { + grpc.ServerStream +} + +func (x *pipelineServicePipelineLogsServer) Send(m *LogEntry) error { + return x.ServerStream.SendMsg(m) +} + +func _PipelineService_WatchSteps_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WatchStepRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(PipelineServiceServer).WatchSteps(m, &pipelineServiceWatchStepsServer{stream}) +} + +type PipelineService_WatchStepsServer interface { + Send(*StepWatchEvent) error + grpc.ServerStream +} + +type pipelineServiceWatchStepsServer struct { + grpc.ServerStream +} + +func (x *pipelineServiceWatchStepsServer) Send(m *StepWatchEvent) error { + return x.ServerStream.SendMsg(m) +} + +var _PipelineService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pipeline.PipelineService", + HandlerType: (*PipelineServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListPipelines", + Handler: _PipelineService_ListPipelines_Handler, + }, + { + MethodName: "GetPipeline", + Handler: _PipelineService_GetPipeline_Handler, + }, + { + MethodName: "RestartPipeline", + Handler: _PipelineService_RestartPipeline_Handler, + }, + { + MethodName: "DeletePipeline", + Handler: _PipelineService_DeletePipeline_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "WatchPipelines", + Handler: _PipelineService_WatchPipelines_Handler, + ServerStreams: true, + }, + { + StreamName: "PipelineLogs", + Handler: _PipelineService_PipelineLogs_Handler, + ServerStreams: true, + }, + { + StreamName: "WatchSteps", + Handler: _PipelineService_WatchSteps_Handler, + ServerStreams: true, + }, + }, + Metadata: "pkg/apiclient/pipeline/pipeline.proto", +} + +func (m *ListPipelinesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPipelinesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListPipelinesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ListOptions != nil { + { + size, err := m.ListOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPipeline(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PipelineWatchEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PipelineWatchEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PipelineWatchEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPipeline(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetPipelineRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetPipelineRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetPipelineRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.GetOptions != nil { + { + size, err := m.GetOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPipeline(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RestartPipelineRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RestartPipelineRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RestartPipelineRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RestartPipelineResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RestartPipelineResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RestartPipelineResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *DeletePipelineRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeletePipelineRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeletePipelineRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPipeline(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeletePipelineResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeletePipelineResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeletePipelineResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *WatchStepRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchStepRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchStepRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ListOptions != nil { + { + size, err := m.ListOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPipeline(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StepWatchEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StepWatchEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StepWatchEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPipeline(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PipelineLogsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PipelineLogsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PipelineLogsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.PodLogOptions != nil { + { + size, err := m.PodLogOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPipeline(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Grep) > 0 { + i -= len(m.Grep) + copy(dAtA[i:], m.Grep) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Grep))) + i-- + dAtA[i] = 0x22 + } + if len(m.StepName) > 0 { + i -= len(m.StepName) + copy(dAtA[i:], m.StepName) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.StepName))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LogEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LogEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Msg) > 0 { + i -= len(m.Msg) + copy(dAtA[i:], m.Msg) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Msg))) + i-- + dAtA[i] = 0x3a + } + if m.Time != nil { + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPipeline(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.StepName) > 0 { + i -= len(m.StepName) + copy(dAtA[i:], m.StepName) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.StepName))) + i-- + dAtA[i] = 0x1a + } + if len(m.PipelineName) > 0 { + i -= len(m.PipelineName) + copy(dAtA[i:], m.PipelineName) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.PipelineName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintPipeline(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintPipeline(dAtA []byte, offset int, v uint64) int { + offset -= sovPipeline(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ListPipelinesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + if m.ListOptions != nil { + l = m.ListOptions.Size() + n += 1 + l + sovPipeline(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PipelineWatchEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovPipeline(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetPipelineRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + if m.GetOptions != nil { + l = m.GetOptions.Size() + n += 1 + l + sovPipeline(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RestartPipelineRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RestartPipelineResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeletePipelineRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovPipeline(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeletePipelineResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WatchStepRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + if m.ListOptions != nil { + l = m.ListOptions.Size() + n += 1 + l + sovPipeline(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StepWatchEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovPipeline(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PipelineLogsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + l = len(m.StepName) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + l = len(m.Grep) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + if m.PodLogOptions != nil { + l = m.PodLogOptions.Size() + n += 1 + l + sovPipeline(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LogEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + l = len(m.PipelineName) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + l = len(m.StepName) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + if m.Time != nil { + l = m.Time.Size() + n += 1 + l + sovPipeline(uint64(l)) + } + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovPipeline(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovPipeline(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPipeline(x uint64) (n int) { + return sovPipeline(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ListPipelinesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPipelinesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPipelinesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ListOptions == nil { + m.ListOptions = &v1.ListOptions{} + } + if err := m.ListOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PipelineWatchEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PipelineWatchEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PipelineWatchEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &v1alpha1.Pipeline{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPipelineRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPipelineRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPipelineRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetOptions == nil { + m.GetOptions = &v1.GetOptions{} + } + if err := m.GetOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RestartPipelineRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RestartPipelineRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RestartPipelineRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RestartPipelineResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RestartPipelineResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RestartPipelineResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeletePipelineRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeletePipelineRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeletePipelineRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteOptions == nil { + m.DeleteOptions = &v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeletePipelineResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeletePipelineResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeletePipelineResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchStepRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchStepRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchStepRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ListOptions == nil { + m.ListOptions = &v1.ListOptions{} + } + if err := m.ListOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StepWatchEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StepWatchEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StepWatchEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &v1alpha1.Step{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PipelineLogsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PipelineLogsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PipelineLogsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StepName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StepName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Grep", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Grep = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodLogOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodLogOptions == nil { + m.PodLogOptions = &v11.PodLogOptions{} + } + if err := m.PodLogOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PipelineName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PipelineName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StepName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StepName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Time == nil { + m.Time = &v1.Time{} + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPipeline + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPipeline + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPipeline + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPipeline(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPipeline + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPipeline(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPipeline + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPipeline + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPipeline + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPipeline + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPipeline + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPipeline + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPipeline = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPipeline = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPipeline = fmt.Errorf("proto: unexpected end of group") +) diff --git a/pkg/apiclient/pipeline/pipeline.pb.gw.go b/pkg/apiclient/pipeline/pipeline.pb.gw.go new file mode 100644 index 000000000000..d759ffa6b373 --- /dev/null +++ b/pkg/apiclient/pipeline/pipeline.pb.gw.go @@ -0,0 +1,843 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: pkg/apiclient/pipeline/pipeline.proto + +/* +Package pipeline is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package pipeline + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_PipelineService_ListPipelines_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_PipelineService_ListPipelines_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListPipelinesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_PipelineService_ListPipelines_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListPipelines(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_PipelineService_ListPipelines_0(ctx context.Context, marshaler runtime.Marshaler, server PipelineServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListPipelinesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_PipelineService_ListPipelines_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListPipelines(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_PipelineService_WatchPipelines_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_PipelineService_WatchPipelines_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (PipelineService_WatchPipelinesClient, runtime.ServerMetadata, error) { + var protoReq ListPipelinesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_PipelineService_WatchPipelines_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.WatchPipelines(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + +var ( + filter_PipelineService_GetPipeline_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "name": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_PipelineService_GetPipeline_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPipelineRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_PipelineService_GetPipeline_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPipeline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_PipelineService_GetPipeline_0(ctx context.Context, marshaler runtime.Marshaler, server PipelineServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPipelineRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_PipelineService_GetPipeline_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPipeline(ctx, &protoReq) + return msg, metadata, err + +} + +func request_PipelineService_RestartPipeline_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RestartPipelineRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + msg, err := client.RestartPipeline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_PipelineService_RestartPipeline_0(ctx context.Context, marshaler runtime.Marshaler, server PipelineServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RestartPipelineRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + msg, err := server.RestartPipeline(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_PipelineService_DeletePipeline_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "name": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_PipelineService_DeletePipeline_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeletePipelineRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_PipelineService_DeletePipeline_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DeletePipeline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_PipelineService_DeletePipeline_0(ctx context.Context, marshaler runtime.Marshaler, server PipelineServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeletePipelineRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_PipelineService_DeletePipeline_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DeletePipeline(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_PipelineService_PipelineLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_PipelineService_PipelineLogs_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (PipelineService_PipelineLogsClient, runtime.ServerMetadata, error) { + var protoReq PipelineLogsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_PipelineService_PipelineLogs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.PipelineLogs(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + +var ( + filter_PipelineService_WatchSteps_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_PipelineService_WatchSteps_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (PipelineService_WatchStepsClient, runtime.ServerMetadata, error) { + var protoReq WatchStepRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_PipelineService_WatchSteps_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.WatchSteps(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + +// RegisterPipelineServiceHandlerServer registers the http handlers for service PipelineService to "mux". +// UnaryRPC :call PipelineServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterPipelineServiceHandlerFromEndpoint instead. +func RegisterPipelineServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server PipelineServiceServer) error { + + mux.Handle("GET", pattern_PipelineService_ListPipelines_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_PipelineService_ListPipelines_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_ListPipelines_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_PipelineService_WatchPipelines_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") + _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + }) + + mux.Handle("GET", pattern_PipelineService_GetPipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_PipelineService_GetPipeline_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_GetPipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_PipelineService_RestartPipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_PipelineService_RestartPipeline_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_RestartPipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_PipelineService_DeletePipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_PipelineService_DeletePipeline_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_DeletePipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_PipelineService_PipelineLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") + _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + }) + + mux.Handle("GET", pattern_PipelineService_WatchSteps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") + _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + }) + + return nil +} + +// RegisterPipelineServiceHandlerFromEndpoint is same as RegisterPipelineServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterPipelineServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterPipelineServiceHandler(ctx, mux, conn) +} + +// RegisterPipelineServiceHandler registers the http handlers for service PipelineService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterPipelineServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterPipelineServiceHandlerClient(ctx, mux, NewPipelineServiceClient(conn)) +} + +// RegisterPipelineServiceHandlerClient registers the http handlers for service PipelineService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "PipelineServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "PipelineServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "PipelineServiceClient" to call the correct interceptors. +func RegisterPipelineServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client PipelineServiceClient) error { + + mux.Handle("GET", pattern_PipelineService_ListPipelines_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_PipelineService_ListPipelines_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_ListPipelines_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_PipelineService_WatchPipelines_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_PipelineService_WatchPipelines_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_WatchPipelines_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_PipelineService_GetPipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_PipelineService_GetPipeline_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_GetPipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_PipelineService_RestartPipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_PipelineService_RestartPipeline_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_RestartPipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_PipelineService_DeletePipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_PipelineService_DeletePipeline_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_DeletePipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_PipelineService_PipelineLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_PipelineService_PipelineLogs_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_PipelineLogs_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_PipelineService_WatchSteps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_PipelineService_WatchSteps_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PipelineService_WatchSteps_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_PipelineService_ListPipelines_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "pipelines", "namespace"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_PipelineService_WatchPipelines_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "stream", "pipelines", "namespace"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_PipelineService_GetPipeline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "pipelines", "namespace", "name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_PipelineService_RestartPipeline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "pipelines", "namespace", "name", "restart"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_PipelineService_DeletePipeline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "pipelines", "namespace", "name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_PipelineService_PipelineLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "stream", "pipelines", "namespace", "logs"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_PipelineService_WatchSteps_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "stream", "steps", "namespace"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_PipelineService_ListPipelines_0 = runtime.ForwardResponseMessage + + forward_PipelineService_WatchPipelines_0 = runtime.ForwardResponseStream + + forward_PipelineService_GetPipeline_0 = runtime.ForwardResponseMessage + + forward_PipelineService_RestartPipeline_0 = runtime.ForwardResponseMessage + + forward_PipelineService_DeletePipeline_0 = runtime.ForwardResponseMessage + + forward_PipelineService_PipelineLogs_0 = runtime.ForwardResponseStream + + forward_PipelineService_WatchSteps_0 = runtime.ForwardResponseStream +) diff --git a/pkg/apiclient/pipeline/pipeline.proto b/pkg/apiclient/pipeline/pipeline.proto new file mode 100644 index 000000000000..3ec84ef13482 --- /dev/null +++ b/pkg/apiclient/pipeline/pipeline.proto @@ -0,0 +1,97 @@ +syntax = "proto3"; +option go_package = "github.com/argoproj/argo-workflows/pkg/apiclient/pipeline"; + +import "google/api/annotations.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/api/core/v1/generated.proto"; +import "github.com/argoproj-labs/argo-dataflow/api/v1alpha1/generated.proto"; + +package pipeline; + +message ListPipelinesRequest { + string namespace = 1; + k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions listOptions = 2; +} + +message PipelineWatchEvent { + string type = 1; + github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline object = 2; +} + +message GetPipelineRequest { + string name = 1; + string namespace = 2; + k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions getOptions = 3; +} + +message RestartPipelineRequest { + string name = 1; + string namespace = 2; +} + +message RestartPipelineResponse { +} + +message DeletePipelineRequest { + string name = 1; + string namespace = 2; + k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 3; +} + +message DeletePipelineResponse { +} + +message WatchStepRequest { + string namespace = 1; + k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions listOptions = 2; +} + +message StepWatchEvent { + string type = 1; + github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Step object = 2; +} + +message PipelineLogsRequest { + string namespace = 1; + // optional - only return entries for this pipeline + string name = 2; + // optional - only return entries for this step + string stepName = 3; + // optional - only return entries which match this expresssion + string grep = 4; + k8s.io.api.core.v1.PodLogOptions podLogOptions = 5; +} + +// structured log entry +message LogEntry { + string namespace = 1; + string pipelineName = 2; + string stepName = 3; + k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 6; + string msg = 7; +} + + +service PipelineService { + rpc ListPipelines (ListPipelinesRequest) returns (github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineList) { + option (google.api.http).get = "/api/v1/pipelines/{namespace}"; + } + rpc WatchPipelines (ListPipelinesRequest) returns (stream PipelineWatchEvent) { + option (google.api.http).get = "/api/v1/stream/pipelines/{namespace}"; + } + rpc GetPipeline (GetPipelineRequest) returns (github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline) { + option (google.api.http).get = "/api/v1/pipelines/{namespace}/{name}"; + } + rpc RestartPipeline (RestartPipelineRequest) returns (RestartPipelineResponse) { + option (google.api.http).post = "/api/v1/pipelines/{namespace}/{name}/restart"; + } + rpc DeletePipeline (DeletePipelineRequest) returns (DeletePipelineResponse) { + option (google.api.http).delete = "/api/v1/pipelines/{namespace}/{name}"; + } + rpc PipelineLogs (PipelineLogsRequest) returns (stream LogEntry) { + option (google.api.http).get = "/api/v1/stream/pipelines/{namespace}/logs"; + } + rpc WatchSteps (WatchStepRequest) returns (stream StepWatchEvent) { + option (google.api.http).get = "/api/v1/stream/steps/{namespace}"; + } +} diff --git a/server/apiserver/argoserver.go b/server/apiserver/argoserver.go index 8ea14a07e477..04ed62b6c50d 100644 --- a/server/apiserver/argoserver.go +++ b/server/apiserver/argoserver.go @@ -30,6 +30,7 @@ import ( eventpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/event" eventsourcepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/eventsource" infopkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/info" + pipelinepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/pipeline" sensorpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sensor" workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive" @@ -44,6 +45,7 @@ import ( "github.com/argoproj/argo-workflows/v3/server/event" "github.com/argoproj/argo-workflows/v3/server/eventsource" "github.com/argoproj/argo-workflows/v3/server/info" + pipeline "github.com/argoproj/argo-workflows/v3/server/pipeline" "github.com/argoproj/argo-workflows/v3/server/sensor" "github.com/argoproj/argo-workflows/v3/server/static" "github.com/argoproj/argo-workflows/v3/server/types" @@ -259,6 +261,7 @@ func (as *argoServer) newGRPCServer(instanceIDService instanceid.Service, offloa infopkg.RegisterInfoServiceServer(grpcServer, info.NewInfoServer(as.managedNamespace, links)) eventpkg.RegisterEventServiceServer(grpcServer, eventServer) eventsourcepkg.RegisterEventSourceServiceServer(grpcServer, eventsource.NewEventSourceServer()) + pipelinepkg.RegisterPipelineServiceServer(grpcServer, pipeline.NewPipelineServer()) sensorpkg.RegisterSensorServiceServer(grpcServer, sensor.NewSensorServer()) workflowpkg.RegisterWorkflowServiceServer(grpcServer, workflow.NewWorkflowServer(instanceIDService, offloadNodeStatusRepo)) workflowtemplatepkg.RegisterWorkflowTemplateServiceServer(grpcServer, workflowtemplate.NewWorkflowTemplateServer(instanceIDService)) @@ -306,6 +309,7 @@ func (as *argoServer) newHTTPServer(ctx context.Context, port int, artifactServe mustRegisterGWHandler(eventpkg.RegisterEventServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mustRegisterGWHandler(eventsourcepkg.RegisterEventSourceServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mustRegisterGWHandler(sensorpkg.RegisterSensorServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) + mustRegisterGWHandler(pipelinepkg.RegisterPipelineServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mustRegisterGWHandler(workflowpkg.RegisterWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mustRegisterGWHandler(workflowtemplatepkg.RegisterWorkflowTemplateServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mustRegisterGWHandler(cronworkflowpkg.RegisterCronWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) diff --git a/server/auth/gatekeeper.go b/server/auth/gatekeeper.go index c3fd21404d4a..827584cd51c1 100644 --- a/server/auth/gatekeeper.go +++ b/server/auth/gatekeeper.go @@ -18,6 +18,7 @@ import ( "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -34,6 +35,7 @@ import ( type ContextKey string const ( + DynamicKey ContextKey = "dynamic.Interface" WfKey ContextKey = "workflow.Interface" SensorKey ContextKey = "sensor.Interface" EventSourceKey ContextKey = "eventsource.Interface" @@ -96,6 +98,7 @@ func (s *gatekeeper) Context(ctx context.Context) (context.Context, error) { if err != nil { return nil, err } + ctx = context.WithValue(ctx, DynamicKey, clients.Dynamic) ctx = context.WithValue(ctx, WfKey, clients.Workflow) ctx = context.WithValue(ctx, EventSourceKey, clients.EventSource) ctx = context.WithValue(ctx, SensorKey, clients.Sensor) @@ -104,6 +107,10 @@ func (s *gatekeeper) Context(ctx context.Context) (context.Context, error) { return ctx, nil } +func GetDynamicClient(ctx context.Context) dynamic.Interface { + return ctx.Value(DynamicKey).(dynamic.Interface) +} + func GetWfClient(ctx context.Context) workflow.Interface { return ctx.Value(WfKey).(workflow.Interface) } @@ -254,6 +261,10 @@ func DefaultClientForAuthorization(authorization string) (*rest.Config, *servert if err != nil { return nil, nil, fmt.Errorf("failed to create REST config: %w", err) } + dynamicClient, err := dynamic.NewForConfig(restConfig) + if err != nil { + return nil, nil, fmt.Errorf("failure to create dynamic client: %w", err) + } wfClient, err := workflow.NewForConfig(restConfig) if err != nil { return nil, nil, fmt.Errorf("failure to create workflow client: %w", err) @@ -270,5 +281,11 @@ func DefaultClientForAuthorization(authorization string) (*rest.Config, *servert if err != nil { return nil, nil, fmt.Errorf("failure to create kubernetes client: %w", err) } - return restConfig, &servertypes.Clients{Workflow: wfClient, EventSource: eventSourceClient, Sensor: sensorClient, Kubernetes: kubeClient}, nil + return restConfig, &servertypes.Clients{ + Dynamic: dynamicClient, + Workflow: wfClient, + Sensor: sensorClient, + EventSource: eventSourceClient, + Kubernetes: kubeClient, + }, nil } diff --git a/server/eventsource/event_source_server.go b/server/eventsource/event_source_server.go index cdce916fbb43..9f869229ae8d 100644 --- a/server/eventsource/event_source_server.go +++ b/server/eventsource/event_source_server.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "io" - "regexp" esv1 "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" corev1 "k8s.io/api/core/v1" @@ -70,14 +69,11 @@ func (e *eventSourceServer) EventSourcesLogs(in *eventsourcepkg.EventSourcesLogs if in.Name != "" { labelSelector += "=" + in.Name } - grep, err := regexp.Compile(in.Grep) - if err != nil { - return err - } return logs.LogPods( svr.Context(), in.Namespace, labelSelector, + in.Grep, in.PodLogOptions, func(pod *corev1.Pod, data []byte) error { now := metav1.Now() @@ -95,9 +91,6 @@ func (e *eventSourceServer) EventSourcesLogs(in *eventsourcepkg.EventSourcesLogs if in.EventName != "" && in.EventName != e.EventName { return nil } - if !grep.MatchString(e.Msg) { - return nil - } return svr.Send(e) }, ) diff --git a/server/pipeline/pipeline_server.go b/server/pipeline/pipeline_server.go new file mode 100644 index 000000000000..fae785fc0107 --- /dev/null +++ b/server/pipeline/pipeline_server.go @@ -0,0 +1,179 @@ +package pipeline + +import ( + "context" + "io" + + dfv1 "github.com/argoproj-labs/argo-dataflow/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + apierr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + pipelinepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/pipeline" + "github.com/argoproj/argo-workflows/v3/server/auth" + "github.com/argoproj/argo-workflows/v3/util/logs" +) + +type server struct{} + +func (s *server) ListPipelines(ctx context.Context, req *pipelinepkg.ListPipelinesRequest) (*dfv1.PipelineList, error) { + client := auth.GetDynamicClient(ctx) + opts := metav1.ListOptions{} + if req.ListOptions != nil { + opts = *req.ListOptions + } + list, err := client.Resource(dfv1.PipelineGroupVersionResource).Namespace(req.Namespace).List(ctx, opts) + if err != nil { + return nil, err + } + items := make([]dfv1.Pipeline, len(list.Items)) + for i, un := range list.Items { + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &items[i]); err != nil { + return nil, err + } + } + return &dfv1.PipelineList{Items: items}, nil +} + +func (s *server) WatchPipelines(req *pipelinepkg.ListPipelinesRequest, svr pipelinepkg.PipelineService_WatchPipelinesServer) error { + ctx := svr.Context() + client := auth.GetDynamicClient(ctx) + opts := metav1.ListOptions{} + if req.ListOptions != nil { + opts = *req.ListOptions + } + watcher, err := client.Resource(dfv1.PipelineGroupVersionResource).Namespace(req.Namespace).Watch(ctx, opts) + if err != nil { + return err + } + defer watcher.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case event, open := <-watcher.ResultChan(): + if !open { + return io.EOF + } + un, ok := event.Object.(*unstructured.Unstructured) + if !ok { + return apierr.FromObject(event.Object) + } + pl := &dfv1.Pipeline{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, pl); err != nil { + return err + } + if err := svr.Send(&pipelinepkg.PipelineWatchEvent{Type: string(event.Type), Object: pl}); err != nil { + return err + } + } + } +} + +func (s *server) GetPipeline(ctx context.Context, req *pipelinepkg.GetPipelineRequest) (*dfv1.Pipeline, error) { + client := auth.GetDynamicClient(ctx) + opts := metav1.GetOptions{} + if req.GetOptions != nil { + opts = *req.GetOptions + } + un, err := client.Resource(dfv1.PipelineGroupVersionResource).Namespace(req.Namespace).Get(ctx, req.Name, opts) + if err != nil { + return nil, err + } + item := &dfv1.Pipeline{} + return item, runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, item) +} + +func (s *server) RestartPipeline(ctx context.Context, req *pipelinepkg.RestartPipelineRequest) (*pipelinepkg.RestartPipelineResponse, error) { + client := auth.GetKubeClient(ctx) + err := client.CoreV1().Pods(req.Namespace).DeleteCollection( + ctx, + metav1.DeleteOptions{}, + metav1.ListOptions{LabelSelector: dfv1.KeyPipelineName + "=" + req.Name}, + ) + if err != nil { + return nil, err + } + return &pipelinepkg.RestartPipelineResponse{}, nil +} + +func (s *server) DeletePipeline(ctx context.Context, req *pipelinepkg.DeletePipelineRequest) (*pipelinepkg.DeletePipelineResponse, error) { + client := auth.GetDynamicClient(ctx) + opts := metav1.DeleteOptions{} + if req.DeleteOptions != nil { + opts = *req.DeleteOptions + } + err := client.Resource(dfv1.PipelineGroupVersionResource).Namespace(req.Namespace).Delete(ctx, req.Name, opts) + if err != nil { + return nil, err + } + return &pipelinepkg.DeletePipelineResponse{}, nil +} + +func (s *server) PipelineLogs(in *pipelinepkg.PipelineLogsRequest, svr pipelinepkg.PipelineService_PipelineLogsServer) error { + labelSelector := dfv1.KeyPipelineName + if in.Name != "" { + labelSelector += "=" + in.Name + } + if in.StepName != "" { + labelSelector += "," + dfv1.KeyStepName + "=" + in.StepName + } + return logs.LogPods( + svr.Context(), + in.Namespace, + labelSelector, + in.Grep, + in.PodLogOptions, + func(pod *corev1.Pod, data []byte) error { + now := metav1.Now() + return svr.Send(&pipelinepkg.LogEntry{ + Namespace: pod.Namespace, + PipelineName: pod.Labels[dfv1.KeyPipelineName], + StepName: pod.Labels[dfv1.KeyStepName], + Time: &now, + Msg: string(data), + }) + }, + ) +} + +func (s *server) WatchSteps(req *pipelinepkg.WatchStepRequest, svr pipelinepkg.PipelineService_WatchStepsServer) error { + ctx := svr.Context() + client := auth.GetDynamicClient(ctx) + opts := metav1.ListOptions{} + if req.ListOptions != nil { + opts = *req.ListOptions + } + watcher, err := client.Resource(dfv1.StepGroupVersionResource).Namespace(req.Namespace).Watch(ctx, opts) + if err != nil { + return err + } + defer watcher.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case event, open := <-watcher.ResultChan(): + if !open { + return io.EOF + } + un, ok := event.Object.(*unstructured.Unstructured) + if !ok { + return apierr.FromObject(event.Object) + } + step := &dfv1.Step{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, step); err != nil { + return err + } + if err := svr.Send(&pipelinepkg.StepWatchEvent{Type: string(event.Type), Object: step}); err != nil { + return err + } + } + } +} + +func NewPipelineServer() pipelinepkg.PipelineServiceServer { + return &server{} +} diff --git a/server/sensor/sensor_server.go b/server/sensor/sensor_server.go index 70a6cd03b4b6..e91cb2b18758 100644 --- a/server/sensor/sensor_server.go +++ b/server/sensor/sensor_server.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "io" - "regexp" sv1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" corev1 "k8s.io/api/core/v1" @@ -55,14 +54,11 @@ func (s *sensorServer) SensorsLogs(in *sensorpkg.SensorsLogsRequest, svr sensorp if in.Name != "" { labelSelector += "=" + in.Name } - grep, err := regexp.Compile(in.Grep) - if err != nil { - return err - } return logs.LogPods( svr.Context(), in.Namespace, labelSelector, + in.Grep, in.PodLogOptions, func(pod *corev1.Pod, data []byte) error { now := metav1.Now() @@ -77,9 +73,6 @@ func (s *sensorServer) SensorsLogs(in *sensorpkg.SensorsLogsRequest, svr sensorp if in.TriggerName != "" && in.TriggerName != e.TriggerName { return nil } - if !grep.MatchString(e.Msg) { - return nil - } return svr.Send(e) }, ) diff --git a/server/types/clients.go b/server/types/clients.go index dfed8f59072e..d9033d82371d 100644 --- a/server/types/clients.go +++ b/server/types/clients.go @@ -3,12 +3,14 @@ package types import ( eventsource "github.com/argoproj/argo-events/pkg/client/eventsource/clientset/versioned" sensor "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" workflow "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" ) type Clients struct { + Dynamic dynamic.Interface Workflow workflow.Interface Sensor sensor.Interface EventSource eventsource.Interface diff --git a/test/e2e/argo_server_test.go b/test/e2e/argo_server_test.go index 9b45d976e0ef..6a8f2c2a3c00 100644 --- a/test/e2e/argo_server_test.go +++ b/test/e2e/argo_server_test.go @@ -1642,6 +1642,20 @@ func (s *ArgoServerSuite) TestEventSourcesService() { }) } +func (s *ArgoServerSuite) TestPipelineService() { + s.T().SkipNow() + s.Run("GetPipeline", func() { + s.e().GET("/api/v1/pipelines/argo/not-exists"). + Expect(). + Status(404) + }) + s.Run("ListPipelines", func() { + s.e().GET("/api/v1/pipelines/argo"). + Expect(). + Status(200) + }) +} + func (s *ArgoServerSuite) TestSensorService() { s.Run("CreateSensor", func() { s.e().POST("/api/v1/sensors/argo"). diff --git a/ui/package.json b/ui/package.json index 4d0f85f32ff1..7d2158e57430 100644 --- a/ui/package.json +++ b/ui/package.json @@ -40,7 +40,7 @@ "devDependencies": { "@babel/core": "^7.0.0-0", "@babel/preset-env": "^7.12.1", - "@fortawesome/fontawesome-free": "^5.12.0", + "@fortawesome/fontawesome-free": "^5.15.3", "@types/chart.js": "^2.9.24", "@types/classnames": "^2.2.3", "@types/dagre": "^0.7.44", diff --git a/ui/src/app/app-router.tsx b/ui/src/app/app-router.tsx index e9be83a0a072..7b586d486bfb 100644 --- a/ui/src/app/app-router.tsx +++ b/ui/src/app/app-router.tsx @@ -13,6 +13,7 @@ import eventflow from './event-flow'; import eventSources from './event-sources'; import help from './help'; import login from './login'; +import pipelines from './pipelines'; import reports from './reports'; import sensors from './sensors'; import {uiUrl} from './shared/base'; @@ -28,6 +29,7 @@ import workflows from './workflows'; const eventFlowUrl = uiUrl('event-flow'); const sensorUrl = uiUrl('sensors'); +const pipelinesUrl = uiUrl('pipelines'); const workflowsUrl = uiUrl('workflows'); const workflowsEventBindingsUrl = uiUrl('workflow-event-bindings'); const workflowTemplatesUrl = uiUrl('workflow-templates'); @@ -98,6 +100,11 @@ export const AppRouter = ({popupManager, history, notificationsManager}: {popupM path: cronWorkflowsUrl + namespaceSuffix, iconClassName: 'fa fa-clock' }, + { + title: 'Pipelines', + path: pipelinesUrl + '/' + namespace, + iconClassName: 'fa fa-wind' + }, { title: 'Event Flow', path: eventFlowUrl + namespaceSuffix, @@ -152,6 +159,7 @@ export const AppRouter = ({popupManager, history, notificationsManager}: {popupM + diff --git a/ui/src/app/pipelines/components/pipeline-container.tsx b/ui/src/app/pipelines/components/pipeline-container.tsx new file mode 100644 index 000000000000..51fb5afdc896 --- /dev/null +++ b/ui/src/app/pipelines/components/pipeline-container.tsx @@ -0,0 +1,11 @@ +import * as React from 'react'; +import {Route, RouteComponentProps, Switch} from 'react-router'; +import {PipelineDetails} from './pipeline-details/pipeline-details'; +import {PipelineList} from './pipeline-list/pipeline-list'; + +export const PipelineContainer = (props: RouteComponentProps) => ( + + + + +); diff --git a/ui/src/app/pipelines/components/pipeline-details/pipeline-details.tsx b/ui/src/app/pipelines/components/pipeline-details/pipeline-details.tsx new file mode 100644 index 000000000000..182a12191234 --- /dev/null +++ b/ui/src/app/pipelines/components/pipeline-details/pipeline-details.tsx @@ -0,0 +1,172 @@ +import {NotificationType, Page} from 'argo-ui'; +import * as React from 'react'; +import {useContext, useEffect, useState} from 'react'; +import {RouteComponentProps} from 'react-router'; +import {Pipeline} from '../../../../models/pipeline'; +import {Step} from '../../../../models/step'; +import {uiUrl} from '../../../shared/base'; +import {ErrorNotice} from '../../../shared/components/error-notice'; +import {GraphPanel} from '../../../shared/components/graph/graph-panel'; +import {Loading} from '../../../shared/components/loading'; +import {Context} from '../../../shared/context'; +import {historyUrl} from '../../../shared/history'; +import {ListWatch} from '../../../shared/list-watch'; +import {services} from '../../../shared/services'; +import {StepSidePanel} from '../step-side-panel'; +import {graph} from './pipeline-graph'; + +require('./pipeline.scss'); + +export const PipelineDetails = ({history, match, location}: RouteComponentProps) => { + const {notifications, navigation, popup} = useContext(Context); + const queryParams = new URLSearchParams(location.search); + // state for URL and query parameters + const namespace = match.params.namespace; + const name = match.params.name; + + const [tab, setTab] = useState(queryParams.get('tab')); + const [selectedStep, selectStep] = useState(queryParams.get('selectedStep')); + + useEffect( + () => + history.push( + historyUrl('pipelines/{namespace}/{name}', { + namespace, + name, + selectedStep, + tab + }) + ), + [namespace, name, selectedStep, tab] + ); + + const [error, setError] = useState(); + const [pipeline, setPipeline] = useState(); + const [steps, setSteps] = useState([]); + + useEffect(() => { + services.pipeline + .getPipeline(namespace, name) + .then(setPipeline) + .then(() => setError(null)) + .catch(setError); + const w = new ListWatch( + () => Promise.resolve({metadata: {}, items: []}), + () => services.pipeline.watchSteps(namespace, ['dataflow.argoproj.io/pipeline-name=' + name]), + () => setError(null), + () => setError(null), + items => setSteps([...items]), + setError + ); + w.start(); + return () => w.stop(); + }, [name, namespace]); + + const step = steps.find(s => s.spec.name === selectedStep); + return ( + { + services.pipeline + .restartPipeline(namespace, name) + .then(() => setError(null)) + .then(() => + notifications.show({type: NotificationType.Success, content: 'Your pipeline pods should terminate within ~30s, before being re-created'}) + ) + .catch(setError); + } + }, + { + title: 'Delete', + iconClassName: 'fa fa-trash', + action: () => { + popup.confirm('confirm', 'Are you sure you want to delete this pipeline?').then(yes => { + if (yes) { + services.pipeline + .deletePipeline(namespace, name) + .then(() => navigation.goto(uiUrl('pipelines/' + namespace))) + .then(() => setError(null)) + .catch(setError); + } + }); + } + } + ] + } + }}> + <> + + {!pipeline ? ( + + ) : ( + <> + { + if (node.startsWith('step/')) { + selectStep(node.replace('step/', '')); + } + }} + /> + {step && ( + selectStep(null)} + /> + )} + + )} + + + ); +}; diff --git a/ui/src/app/pipelines/components/pipeline-details/pipeline-graph.ts b/ui/src/app/pipelines/components/pipeline-details/pipeline-graph.ts new file mode 100644 index 000000000000..38a6cd0bdf69 --- /dev/null +++ b/ui/src/app/pipelines/components/pipeline-details/pipeline-graph.ts @@ -0,0 +1,160 @@ +import {Pipeline} from '../../../../models/pipeline'; +import {Metrics, Step} from '../../../../models/step'; +import {Graph} from '../../../shared/components/graph/types'; +import {Icon} from '../../../shared/components/icon'; +import {totalRate} from '../total-rate'; + +type Type = '' | 'cat' | 'code' | 'container' | 'dedupe' | 'expand' | 'filter' | 'flatten' | 'git' | 'group' | 'map' | 'split'; + +const stepIcon = (type: Type): Icon => { + switch (type) { + case 'cat': + case 'map': + return 'chevron-right'; + case 'code': + return 'code'; + case 'container': + return 'cube'; + case 'dedupe': + return 'filter'; + case 'expand': + return 'expand'; + case 'filter': + return 'filter'; + case 'flatten': + return 'compress'; + case 'git': + return 'git-alt'; + case 'group': + return 'object-group'; + case 'split': + return 'object-ungroup'; + default: + return 'square'; + } +}; + +const pendingSymbol = '◷'; + +function formatRates(metrics: Metrics, replicas: number): string { + const rates = Object.entries(metrics || {}) + // the rate will remain after scale-down, so we must filter out, as it'll be wrong + .filter(([replica]) => parseInt(replica, 10) < replicas); + return rates.length > 0 ? 'Δ' + totalRate(metrics, replicas) : ''; +} + +function formatPending(pending: number) { + return pending ? ' ' + pendingSymbol + pending.toLocaleString() + ' ' : ''; +} + +export const graph = (pipeline: Pipeline, steps: Step[]) => { + const g = new Graph(); + + steps.forEach(step => { + const spec = step.spec; + const stepId = 'step/' + spec.name; + const status = step.status || {phase: '', replicas: 0}; + + const type: Type = spec.cat + ? 'cat' + : spec.code + ? 'code' + : spec.container + ? 'container' + : spec.dedupe + ? 'dedupe' + : spec.expand + ? 'expand' + : spec.filter + ? 'filter' + : spec.git + ? 'git' + : spec.flatten + ? 'flatten' + : spec.group + ? 'group' + : spec.map + ? 'map' + : ''; + + const nodeLabel = status.replicas !== 1 ? spec.name + ' (x' + (status.replicas || 0) + ')' : spec.name; + g.nodes.set(stepId, {genre: type, label: nodeLabel, icon: stepIcon(type), classNames: status.phase}); + + const classNames = status.phase === 'Running' ? 'flow' : ''; + (spec.sources || []).forEach(x => { + const ss = (status.sourceStatuses || {})[x.name || ''] || {}; + const label = formatPending(ss.pending) + formatRates(ss.metrics, step.status.replicas); + if (x.cron) { + const cronId = 'cron/' + stepId + '/sources/' + x.cron.schedule; + g.nodes.set(cronId, {genre: 'cron', icon: 'clock', label: x.cron.schedule}); + g.edges.set({v: cronId, w: stepId}, {classNames, label}); + } else if (x.db) { + const id = 'db/' + +stepId + '/sources/' + x.name; + g.nodes.set(id, {genre: 'db', icon: 'database', label: x.name}); + g.edges.set({v: id, w: stepId}, {classNames, label}); + } else if (x.kafka) { + const kafkaId = x.kafka.name || x.kafka.url || 'default'; + const topicId = 'kafka/' + kafkaId + '/' + x.kafka.topic; + g.nodes.set(topicId, {genre: 'kafka', icon: 'stream', label: x.kafka.topic}); + g.edges.set({v: topicId, w: stepId}, {classNames, label}); + } else if (x.stan) { + const stanId = x.stan.name || x.stan.url || 'default'; + const subjectId = 'stan/' + stanId + '/' + x.stan.subject; + g.nodes.set(subjectId, {genre: 'stan', icon: 'stream', label: x.stan.subject}); + g.edges.set({v: subjectId, w: stepId}, {classNames, label}); + } else if (x.http) { + const y = new URL('http://' + (x.http.serviceName || pipeline.metadata.name + '-' + step.spec.name) + '/sources/' + x.name); + const subjectId = 'http/' + y; + g.nodes.set(subjectId, {genre: 'http', icon: 'cloud', label: y.hostname}); + g.edges.set({v: subjectId, w: stepId}, {classNames, label}); + } else if (x.s3) { + const bucket = x.s3.bucket; + const id = 's3/' + bucket; + g.nodes.set(id, {genre: 's3', icon: 'hdd', label: bucket}); + g.edges.set({v: id, w: stepId}, {classNames, label}); + } else { + const id = 'unknown/' + stepId + '/sources/' + x.name; + g.nodes.set(id, {genre: 'unknown', icon: 'square', label: x.name}); + g.edges.set({v: id, w: stepId}, {classNames, label}); + } + }); + (spec.sinks || []).forEach(x => { + const ss = (status.sinkStatuses || {})[x.name || ''] || {}; + const label = formatRates(ss.metrics, step.status.replicas); + if (x.db) { + const id = 'db/' + stepId + '/sinks/' + x.name; + g.nodes.set(id, {genre: 'db', icon: 'database', label: x.name}); + g.edges.set({v: stepId, w: id}, {classNames, label}); + } else if (x.kafka) { + const kafkaId = x.kafka.name || x.kafka.url || 'default'; + const topicId = 'kafka/' + kafkaId + '/' + x.kafka.topic; + g.nodes.set(topicId, {genre: 'kafka', icon: 'stream', label: x.kafka.topic}); + g.edges.set({v: stepId, w: topicId}, {classNames, label}); + } else if (x.log) { + const logId = 'log/' + stepId + '/sinks/' + x.name; + g.nodes.set(logId, {genre: 'log', icon: 'file-alt', label: 'log'}); + g.edges.set({v: stepId, w: logId}, {classNames, label}); + } else if (x.stan) { + const stanId = x.stan.name || x.stan.url || 'default'; + const subjectId = 'stan/' + stanId + '/' + x.stan.subject; + g.nodes.set(subjectId, {genre: 'stan', icon: 'stream', label: x.stan.subject}); + g.edges.set({v: stepId, w: subjectId}, {classNames, label}); + } else if (x.http) { + const y = new URL(x.http.url); + const subjectId = 'http/' + y; + g.nodes.set(subjectId, {genre: 'http', icon: 'cloud', label: y.hostname}); + g.edges.set({v: stepId, w: subjectId}, {classNames, label}); + } else if (x.s3) { + const bucket = x.s3.bucket; + const id = 's3/' + bucket; + g.nodes.set(id, {genre: 's3', icon: 'hdd', label: bucket}); + g.edges.set({v: stepId, w: id}, {classNames, label}); + } else { + const id = 'unknown/' + stepId + '/sinks/' + x.name; + g.nodes.set(id, {genre: 'unknown', icon: 'square', label: x.name}); + g.edges.set({v: stepId, w: id}, {classNames, label}); + } + }); + }); + return g; +}; diff --git a/ui/src/app/pipelines/components/pipeline-details/pipeline.scss b/ui/src/app/pipelines/components/pipeline-details/pipeline.scss new file mode 100644 index 000000000000..42788d4510c8 --- /dev/null +++ b/ui/src/app/pipelines/components/pipeline-details/pipeline.scss @@ -0,0 +1,24 @@ +@import 'node_modules/argo-ui/src/styles/config'; + +.graph.pipeline { + .edge { + path { + stroke-dasharray: 8; + } + } + + .node { + &.Running { + text.icon { + animation: none; + } + } + } + + .edge.flow { + path { + stroke: $argo-running-color; + animation: flowing infinite linear 4s; + } + } +} \ No newline at end of file diff --git a/ui/src/app/pipelines/components/pipeline-details/recent.test.ts b/ui/src/app/pipelines/components/pipeline-details/recent.test.ts new file mode 100644 index 000000000000..880e31a08695 --- /dev/null +++ b/ui/src/app/pipelines/components/pipeline-details/recent.test.ts @@ -0,0 +1,9 @@ +import {recent} from './recent'; + +describe('recent', () => { + test('recency', () => { + expect(recent(null)).toEqual(false); + expect(recent(new Date())).toEqual(true); + expect(recent(new Date(0))).toEqual(false); + }); +}); diff --git a/ui/src/app/pipelines/components/pipeline-details/recent.ts b/ui/src/app/pipelines/components/pipeline-details/recent.ts new file mode 100644 index 000000000000..152000d00dbc --- /dev/null +++ b/ui/src/app/pipelines/components/pipeline-details/recent.ts @@ -0,0 +1,7 @@ +export const recent = (x: Date): boolean => { + if (!x) { + return false; + } + const minutesAgo = (new Date().getTime() - new Date(x).getTime()) / (1000 * 60); + return minutesAgo < 15; +}; diff --git a/ui/src/app/pipelines/components/pipeline-list/pipeline-list.scss b/ui/src/app/pipelines/components/pipeline-list/pipeline-list.scss new file mode 100644 index 000000000000..5551ff460175 --- /dev/null +++ b/ui/src/app/pipelines/components/pipeline-list/pipeline-list.scss @@ -0,0 +1 @@ +@import 'node_modules/argo-ui/src/styles/config'; diff --git a/ui/src/app/pipelines/components/pipeline-list/pipeline-list.tsx b/ui/src/app/pipelines/components/pipeline-list/pipeline-list.tsx new file mode 100644 index 000000000000..bdcbc597186d --- /dev/null +++ b/ui/src/app/pipelines/components/pipeline-list/pipeline-list.tsx @@ -0,0 +1,110 @@ +import {Page} from 'argo-ui'; +import * as React from 'react'; +import {useEffect, useState} from 'react'; +import {Link, RouteComponentProps} from 'react-router-dom'; +import {NodePhase} from '../../../../models'; +import {Pipeline} from '../../../../models/pipeline'; +import {uiUrl} from '../../../shared/base'; +import {ErrorNotice} from '../../../shared/components/error-notice'; +import {Loading} from '../../../shared/components/loading'; +import {NamespaceFilter} from '../../../shared/components/namespace-filter'; +import {PhaseIcon} from '../../../shared/components/phase-icon'; +import {Timestamp} from '../../../shared/components/timestamp'; +import {ZeroState} from '../../../shared/components/zero-state'; +import {historyUrl} from '../../../shared/history'; +import {ListWatch} from '../../../shared/list-watch'; +import {services} from '../../../shared/services'; + +export const PipelineList = ({match, history}: RouteComponentProps) => { + // state for URL and query parameters + const [namespace, setNamespace] = useState(match.params.namespace || ''); + + useEffect( + () => + history.push( + historyUrl('pipelines/{namespace}', { + namespace + }) + ), + [namespace] + ); + + // internal state + const [error, setError] = useState(); + const [pipelines, setPipelines] = useState(); + + useEffect(() => { + const lw = new ListWatch( + () => services.pipeline.listPipelines(namespace), + () => services.pipeline.watchPipelines(namespace), + () => setError(null), + () => setError(null), + items => setPipelines([...items]), + setError + ); + lw.start(); + return () => lw.stop(); + }, [namespace]); + + return ( + ] + }}> + + {!pipelines ? ( + + ) : pipelines.length === 0 ? ( + +

Argo Dataflow is a Kubernetes native platform for executing large parallel data-processing pipelines.

+

+ Each pipeline consists of steps. Each step creates zero or more replicas that load-balance messages from one or more sources (such as a cron, HTTP, Kafka, + or NATS Streaming), processes the data, then sink it to one or more sinks (such as a HTTP, log, Kafka, NATS Streaming). +

+

+ Each step can scale horizontally using HPA or based on queue length using built-in scaling rules. Steps can be scaled-to-zero, in which case they + periodically briefly scale-to-one to measure queue length in case the pipeline needs to scale back up. +

+

+ Learn more +

+
+ ) : ( + <> +
+
+
+
NAME
+
NAMESPACE
+
CREATED
+
MESSAGE
+
CONDITIONS
+
+ {pipelines.map(p => ( + +
+ +
+
{p.metadata.name}
+
{p.metadata.namespace}
+
+ +
+
{p.status && p.status.message}
+
{p.status && p.status.conditions && p.status.conditions.map(c => c.type).join(',')}
+ + ))} +
+ + )} + + ); +}; diff --git a/ui/src/app/pipelines/components/pipeline-logs-viewer.tsx b/ui/src/app/pipelines/components/pipeline-logs-viewer.tsx new file mode 100644 index 000000000000..e1549163057a --- /dev/null +++ b/ui/src/app/pipelines/components/pipeline-logs-viewer.tsx @@ -0,0 +1,80 @@ +import * as React from 'react'; +import {useEffect, useState} from 'react'; +import {Observable} from 'rxjs'; +import {ErrorNotice} from '../../shared/components/error-notice'; +import {services} from '../../shared/services'; +import {FullHeightLogsViewer} from '../../workflows/components/workflow-logs-viewer/full-height-logs-viewer'; + +function identity(value: T) { + return () => value; +} + +export const PipelineLogsViewer = ({namespace, pipelineName, stepName}: {namespace: string; pipelineName: string; stepName: string}) => { + const [container, setContainer] = useState('main'); + const [tailLines, setTailLines] = useState(50); + const [error, setError] = useState(); + const [grep, setGrep] = useState(''); + const [logsObservable, setLogsObservable] = useState>(); + const [logLoaded, setLogLoaded] = useState(false); + // filter allows us to introduce a short delay, before we actually change grep + const [filter, setFilter] = useState(''); + useEffect(() => { + const x = setTimeout(() => setGrep(filter), 1000); + return () => clearTimeout(x); + }, [filter]); + + useEffect(() => { + setError(null); + setLogLoaded(false); + const source = services.pipeline + .pipelineLogs(namespace, pipelineName, stepName, container, grep, tailLines) + .filter(e => !!e) + .map(e => e.msg + '\n') + // this next line highlights the search term in bold with a yellow background, white text + .map(x => x.replace(new RegExp(grep, 'g'), y => '\u001b[1m\u001b[43;1m\u001b[37m' + y + '\u001b[0m')) + .publishReplay() + .refCount(); + const subscription = source.subscribe(() => setLogLoaded(true), setError); + setLogsObservable(source); + return () => subscription.unsubscribe(); + }, [namespace, pipelineName, stepName, container, grep, tailLines]); + + return ( +
+
+ {['init', 'main', 'sidecar'].map(x => ( + setContainer(x)} key={x} style={{margin: 10}}> + {x === container ? ( + + {' '} + + {x} + + ) : ( +   {x} + )} + + ))} + + setFilter(v.target.value)} placeholder='Filter (regexp)...' /> + +
+ + {!logLoaded ? ( +
+ Waiting for data... +
+ ) : ( + false}} /> + )} +
+ +
+
+ ); +}; diff --git a/ui/src/app/pipelines/components/step-side-panel.tsx b/ui/src/app/pipelines/components/step-side-panel.tsx new file mode 100644 index 000000000000..9ba3d5c03af9 --- /dev/null +++ b/ui/src/app/pipelines/components/step-side-panel.tsx @@ -0,0 +1,215 @@ +import {SlidingPanel, Tabs} from 'argo-ui'; +import * as React from 'react'; +import {Step, StepStatus} from '../../../models/step'; +import {ObjectEditor} from '../../shared/components/object-editor/object-editor'; +import {Phase} from '../../shared/components/phase'; +import {TickMeter} from '../../shared/components/tick-meter'; +import {Timestamp} from '../../shared/components/timestamp'; +import {parseResourceQuantity} from '../../shared/resource-quantity'; +import {EventsPanel} from '../../workflows/components/events-panel'; +import {PipelineLogsViewer} from './pipeline-logs-viewer'; +import {totalRate} from './total-rate'; + +const prettyNumber = (x: number): number => (x < 1 ? x : Math.round(x)); + +export const StepSidePanel = ({ + isShown, + namespace, + pipelineName, + step, + setTab, + tab, + onClose +}: { + isShown: boolean; + namespace: string; + pipelineName: string; + step: Step; + tab: string; + setTab: (tab: string) => void; + onClose: () => void; +}) => { + const stepName = step.spec.name; + return ( + + <> +

+ {pipelineName}/{stepName} +

+ + }, + { + title: 'EVENTS', + key: 'events', + content: + }, + { + title: 'MANIFEST', + key: 'manifest', + content: + } + ]} + /> + +
+ ); +}; + +const statusPanel = (step: Step) => + step.status && ( + <> +
+
{statusHeader(step.status)}
+
+
+
{sourcesPanel(step.status)}
+
{sinksPanel(step.status)}
+
+ + ); + +const statusHeader = (status: StepStatus) => ( +
+
+
+
Phase
+
+ {status.message} +
+
+
+
Replicas
+
{status.replicas}
+ {status.lastScaledAt && ( + <> +
Last scaled
+
+ +
+ + )} +
+
+
+); + +const sourcesPanel = (status: StepStatus) => ( + <> +
Sources
+ {status.sourceStatuses ? ( + Object.entries(status.sourceStatuses).map(([name, x]) => { + const total = Object.values(x.metrics || {}) + .filter(m => m.total) + .reduce((a, b) => a + b.total, 0); + const rate = totalRate(x.metrics, status.replicas); + const errors = Object.values(x.metrics || {}) + .filter(m => m.errors) + .reduce((a, b) => a + b.errors, 0); + const retries = Object.values(x.metrics || {}) + .filter(m => m.retries) + .reduce((a, b) => a + b.retries, 0); + return ( +
+

{name}

+
+
+
Pending
+
+ +
+
+
+
Retries
+
+ +
+
+
+
Total
+
+ +
+
+ TPS +
+
+
+
Errors
+
+ +
+
+ % +
+
+
+
+ ); + }) + ) : ( +
None
+ )} + +); + +const sinksPanel = (status: StepStatus) => ( + <> +
Sinks
+ {status.sinkStatuses ? ( + Object.entries(status.sinkStatuses).map(([name, x]) => { + const total = Object.values(x.metrics || {}) + .filter(m => m.total) + .reduce((a, b) => a + b.total, 0); + const rate = Object.entries(x.metrics || {}) + // the rate will remain after scale-down, so we must filter out, as it'll be wrong + .filter(([replica]) => parseInt(replica, 10) < status.replicas) + .map(([, m]) => m) + .map(m => parseResourceQuantity(m.rate)) + .reduce((a, b) => a + b, 0); + const errors = Object.values(x.metrics || {}) + .filter(m => m.errors) + .reduce((a, b) => a + b.errors, 0); + return ( +
+

{name}

+
+
+
Total
+
+ +
+
+ TPS +
+
+
+
Errors
+
+ +
+
+ % +
+
+
+
+ ); + }) + ) : ( +
None
+ )} + +); diff --git a/ui/src/app/pipelines/components/total-rate.ts b/ui/src/app/pipelines/components/total-rate.ts new file mode 100644 index 000000000000..1aeb0267a869 --- /dev/null +++ b/ui/src/app/pipelines/components/total-rate.ts @@ -0,0 +1,17 @@ +import {Metrics} from '../../../models/step'; +import {parseResourceQuantity} from '../../shared/resource-quantity'; + +const prettyNumber = (x: number): number => (x < 1 ? x : Math.round(x)); +export const totalRate = (metrics: Metrics, replicas: number): number => { + const rates = Object.entries(metrics || {}) + // the rate will remain after scale-down, so we must filter out, as it'll be wrong + .filter(([replica, m]) => parseInt(replica, 10) < replicas); + return rates.length > 0 + ? prettyNumber( + rates + .map(([, m]) => m) + .map(m => parseResourceQuantity(m.rate)) + .reduce((a, b) => a + b, 0) + ) + : null; +}; diff --git a/ui/src/app/pipelines/index.ts b/ui/src/app/pipelines/index.ts new file mode 100644 index 000000000000..d56ea074ce20 --- /dev/null +++ b/ui/src/app/pipelines/index.ts @@ -0,0 +1,5 @@ +import {PipelineContainer} from './components/pipeline-container'; + +export default { + component: PipelineContainer +}; diff --git a/ui/src/app/shared/components/icon.ts b/ui/src/app/shared/components/icon.ts index 5981f10ac112..d36be2ca9174 100644 --- a/ui/src/app/shared/components/icon.ts +++ b/ui/src/app/shared/components/icon.ts @@ -1,15 +1,22 @@ export type Icon = + | 'arrows-alt-h' | 'arrow-left' | 'arrow-right' | 'bars' + | 'bed' | 'bell' | 'box' | 'calendar' | 'cloud' + | 'compress' + | 'cube' + | 'chevron-circle-right' + | 'chevron-right' | 'circle' | 'circle-notch' | 'clock' | 'code' + | 'cog' | 'comment' | 'caret-left' | 'caret-right' @@ -18,21 +25,27 @@ export type Icon = | 'check' | 'database' | 'envelope' + | 'expand' | 'edit' | 'ellipsis-h' + | 'exchange-alt' | 'external-link-alt' | 'file' | 'file-alt' | 'file-code' | 'filter' | 'forward' + | 'git-alt' | 'grip-vertical' | 'hdd' + | 'inbox' | 'info-circle' | 'keyboard' | 'link' | 'list' | 'microchip' + | 'object-group' + | 'object-ungroup' | 'play' | 'play-circle' | 'plus-circle' diff --git a/ui/src/app/shared/components/icons.ts b/ui/src/app/shared/components/icons.ts index 099b8bb7761d..6a73226cc3ae 100644 --- a/ui/src/app/shared/components/icons.ts +++ b/ui/src/app/shared/components/icons.ts @@ -1,13 +1,20 @@ export const icons: {[key: string]: string} = { + 'arrows-alt-h': '\uf337', 'arrow-right': '\uf061', + 'bed': '\uf236', 'bell': '\uf0f3', 'box': '\uf466', 'calendar': '\uf133', 'cloud': '\uf0c2', + 'chevron-circle-right': '\uf138', + 'chevron-right': '\uf054', 'circle': '\uf111', 'circle-notch': '\uf1ce', 'clock': '\uf017', 'code': '\uf121', + 'compress': '\uf066', + 'cube': '\uf1b2', + 'cog': '\uf013', 'comment': '\uf075', 'code-branch': '\uf126', 'credit-card': '\uf09d', @@ -15,6 +22,8 @@ export const icons: {[key: string]: string} = { 'database': '\uf1c0', 'envelope': '\uf0e0', 'ellipsis-h': '\uf141', + 'exchange-alt': '\uf362', + 'expand': '\uf065', 'file': '\uf15b', 'file-alt': '\uf15c', 'file-code': '\uf1c9', @@ -22,10 +31,12 @@ export const icons: {[key: string]: string} = { 'forward': '\uf04e', 'grip-vertical': '\uf58e', 'hdd': '\uf0a0', + 'inbox': '\uf01c', 'keyboard': '\uf11c', 'link': '\uf0c1', 'list': '\uf03a', 'microchip': '\uf2db', + 'object-group': '\uf247', 'play': '\uf04b', 'play-circle': '\uf144', 'puzzle-piece': '\uf12e', diff --git a/ui/src/app/shared/components/phase-icon.tsx b/ui/src/app/shared/components/phase-icon.tsx index c20e9058a0aa..783af3b1712a 100644 --- a/ui/src/app/shared/components/phase-icon.tsx +++ b/ui/src/app/shared/components/phase-icon.tsx @@ -1,8 +1,7 @@ import * as classNames from 'classnames'; import * as React from 'react'; -import {NodePhase} from '../../../models'; import {Utils} from '../utils'; -export const PhaseIcon = ({value}: {value: NodePhase}) => { +export const PhaseIcon = ({value}: {value: string}) => { return