diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 884aee826ec82..c3e1ffbc00273 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -123,6 +123,7 @@ receiver/dotnetdiagnosticsreceiver/ @open-telemetry/collector-c receiver/elasticsearchreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @binaryfissiongames receiver/expvarreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis @MovieStoreGuy receiver/filelogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/flinkmetricsreceiver/ @open-telemetry/collector-contrib-approvers @jonathanwamsley @djaglowski receiver/fluentforwardreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax receiver/googlecloudpubsubreceiver/ @open-telemetry/collector-contrib-approvers @alexvanboxel receiver/googlecloudspannerreceiver/ @open-telemetry/collector-contrib-approvers @ydrozhdzhal @asukhyy @khospodarysko @architjugran diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cff50b19dfc0..f0659ecfd6c84 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,6 @@ - `jmxreceiver`: Remove properties & groovyscript parameters from JMX Receiver. Add ResourceAttributes & LogLevel parameter to supply some of the removed functionality with reduced attack surface (#9685) - `resourcedetectionprocessor`: 'gke' and 'gce' resource detectors are replaced with a single 'gcp' detector (#10347) - - `pkg/stanza`: Removed reference to deprecated `ClusterName` (#10426) - `couchbasereceiver`: Fully removed unimplemented Couchbase receiver (#10482) @@ -14,6 +13,8 @@ ### 🚀 New components 🚀 + +- `flinkmetricsreceiver`: Add implementation of Flink Metric Receiver (#10121) - `windowseventlogreceiver` Added implementation of Windows Event Log Receiver (#9228) - `vcenterreceiver`: Add metrics receiver for new vcenterreceiver component (#9224) diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index 17888f3e2ef76..bdb58f01d2ccc 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -346,6 +346,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dotnetdiagnosticsreceiver v0.52.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver v0.52.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.52.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver v0.0.0-00010101000000-000000000000 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.52.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver v0.52.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.52.0 // indirect @@ -761,6 +762,8 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elast replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver => ../../receiver/filelogreceiver +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver => ../../receiver/flinkmetricsreceiver + replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver => ../../receiver/fluentforwardreceiver replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudpubsubreceiver => ../../receiver/googlecloudpubsubreceiver diff --git a/go.mod b/go.mod index 5f559bd3a28ed..a4aec073c2e53 100644 --- a/go.mod +++ b/go.mod @@ -93,6 +93,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dotnetdiagnosticsreceiver v0.52.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver v0.52.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.52.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver v0.0.0-00010101000000-000000000000 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.52.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver v0.52.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.52.0 @@ -757,6 +758,8 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/colle replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/couchdbreceiver => ./receiver/couchdbreceiver +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver => ./receiver/flinkmetricsreceiver + replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver => ./receiver/dockerstatsreceiver replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dotnetdiagnosticsreceiver => ./receiver/dotnetdiagnosticsreceiver diff --git a/internal/components/components.go b/internal/components/components.go index 068255810d4bd..c34c8645b4fa2 100644 --- a/internal/components/components.go +++ b/internal/components/components.go @@ -113,6 +113,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dotnetdiagnosticsreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver" @@ -204,6 +205,7 @@ func Components() (component.Factories, error) { dotnetdiagnosticsreceiver.NewFactory(), elasticsearchreceiver.NewFactory(), filelogreceiver.NewFactory(), + flinkmetricsreceiver.NewFactory(), fluentforwardreceiver.NewFactory(), googlecloudspannerreceiver.NewFactory(), hostmetricsreceiver.NewFactory(), diff --git a/internal/components/receivers_test.go b/internal/components/receivers_test.go index 342c41ed9592d..e5d2a79a83f02 100644 --- a/internal/components/receivers_test.go +++ b/internal/components/receivers_test.go @@ -118,6 +118,9 @@ func TestDefaultReceivers(t *testing.T) { return cfg }, }, + { + receiver: "flinkmetrics", + }, { receiver: "fluentforward", }, diff --git a/receiver/flinkmetricsreceiver/Makefile b/receiver/flinkmetricsreceiver/Makefile new file mode 100644 index 0000000000000..ded7a36092dc3 --- /dev/null +++ b/receiver/flinkmetricsreceiver/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/receiver/flinkmetricsreceiver/README.md b/receiver/flinkmetricsreceiver/README.md new file mode 100644 index 0000000000000..95420a099c828 --- /dev/null +++ b/receiver/flinkmetricsreceiver/README.md @@ -0,0 +1,43 @@ +# FlinkMetrics Receiver + +| Status | | +| ------------------------ | --------- | +| Stability | [alpha] | +| Supported pipeline types | metrics | +| Distributions | [contrib] | + +This receiver uses Flink's [REST API](https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/ops/metrics/#rest-api-integration) to collect Jobmanager, Taskmanager, Job, Task and Operator metrics. + +## Prerequisites + +This receiver supports Apache Flink versions `1.13.6` and `1.14.4`. + +By default, authentication is not required. However, [Flink recommends](https://nightlies.apache.org/flink/flink-docs-master/docs/deployment/security/security-ssl/#external--rest-connectivity) using a “side car proxy” that Binds the REST endpoint to the loopback interface and to start a REST proxy that authenticates and forwards the request to Flink. + +[SSL](https://nightlies.apache.org/flink/flink-docs-master/docs/deployment/security/security-ssl/#external--rest-connectivity) can be enabled with the following REST endpoint [options](https://nightlies.apache.org/flink/flink-docs-master/docs/deployment/security/security-ssl/#rest-endpoints-external-connectivity) for external connectivity and have a self signed certificate or be self signed. + +## Configuration + +The following settings are optional: + +- `endpoint` (default: `http://localhost:15672`): The URL of the node to be monitored. +- `collection_interval` (default = `10s`): This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. +- `tls` (defaults defined [here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)): TLS control. By default insecure settings are rejected and certificate verification is on. + +### Example Configuration + +```yaml +receivers: + flinkmetrics: + endpoint: http://localhost:8081 + collection_interval: 10s +``` + +The full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml). TLS config is documented further under the [opentelemetry collector's configtls package](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md). + +## Metrics + +Details about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml) + +[alpha]: https://github.com/open-telemetry/opentelemetry-collector-contrib#alpha +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib \ No newline at end of file diff --git a/receiver/flinkmetricsreceiver/client.go b/receiver/flinkmetricsreceiver/client.go new file mode 100644 index 0000000000000..0163ab8daddc7 --- /dev/null +++ b/receiver/flinkmetricsreceiver/client.go @@ -0,0 +1,364 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "go.opentelemetry.io/collector/component" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/models" +) + +// The API endpoints required to collect metrics. +const ( + // jobmanagerMetricEndpoint gets jobmanager metrics. + jobmanagerMetricEndpoint = "/jobmanager/metrics" + // taskmanagersEndpoint gets taskmanager IDs. + taskmanagersEndpoint = "/taskmanagers" + // taskmanagersMetricEndpoint gets taskmanager using a taskmanager ID. + taskmanagersMetricEndpoint = "/taskmanagers/%s/metrics" + // jobsEndpoint gets job IDs. + jobsEndpoint = "/jobs" + // jobsOverviewEndpoint gets job IDs with associated Job names. + jobsOverviewEndpoint = "/jobs/overview" + // jobsWithIDEndpoint gets vertex IDs using a job ID. + jobsWithIDEndpoint = "/jobs/%s" + // jobsMetricEndpoint gets job metrics using a job ID. + jobsMetricEndpoint = "/jobs/%s/metrics" + // verticesEndpoint gets subtask index's using a job and vertex ID. + verticesEndpoint = "/jobs/%s/vertices/%s" + // subtaskMetricEndpoint gets subtask metrics using a job ID, vertex ID and subtask index. + subtaskMetricEndpoint = "/jobs/%s/vertices/%s/subtasks/%v/metrics" +) + +type client interface { + GetJobmanagerMetrics(ctx context.Context) (*models.JobmanagerMetrics, error) + GetTaskmanagersMetrics(ctx context.Context) ([]*models.TaskmanagerMetrics, error) + GetJobsMetrics(ctx context.Context) ([]*models.JobMetrics, error) + GetSubtasksMetrics(ctx context.Context) ([]*models.SubtaskMetrics, error) +} + +type flinkClient struct { + client *http.Client + hostEndpoint string + hostName string + logger *zap.Logger +} + +func newClient(cfg *Config, host component.Host, settings component.TelemetrySettings, logger *zap.Logger) (client, error) { + httpClient, err := cfg.ToClient(host.GetExtensions(), settings) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP Client: %w", err) + } + + hostName, err := getHostname() + if err != nil { + return nil, err + } + + return &flinkClient{ + client: httpClient, + hostName: hostName, + hostEndpoint: cfg.Endpoint, + logger: logger, + }, nil +} + +func (c *flinkClient) get(ctx context.Context, path string) ([]byte, error) { + // Construct endpoint and create request + url := c.hostEndpoint + path + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) + if err != nil { + return nil, fmt.Errorf("failed to create get request for path %s: %w", path, err) + } + + // Make request + resp, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to make http request: %w", err) + } + + // Defer body close + defer func() { + if closeErr := resp.Body.Close(); closeErr != nil { + c.logger.Warn("failed to close response body", zap.Error(closeErr)) + } + }() + + // Check for OK status code + if resp.StatusCode != http.StatusOK { + c.logger.Debug("flink API non-200", zap.Error(err), zap.Int("status_code", resp.StatusCode)) + + // Attempt to extract the error payload + payloadData, err := io.ReadAll(resp.Body) + if err != nil { + c.logger.Debug("failed to read payload error message", zap.Error(err)) + } else { + c.logger.Debug("flink API Error", zap.ByteString("api_error", payloadData)) + } + + return nil, fmt.Errorf("non 200 code returned %d", resp.StatusCode) + } + + return io.ReadAll(resp.Body) +} + +// getMetrics makes a request to a metric endpoint to get the metric names, the another request building a query to get the metric values. +func (c *flinkClient) getMetrics(ctx context.Context, path string) (*models.MetricsResponse, error) { + // Get the metric names + var metrics *models.MetricsResponse + body, err := c.get(ctx, path) + if err != nil { + c.logger.Debug("failed to retrieve metric names", zap.Error(err)) + return nil, err + } + + // Populates the metric names + err = json.Unmarshal(body, &metrics) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response body: %w", err) + } + + // Construct a get query parameter using comma-separated list of string values to select specific metrics + query := []string{} + for _, metricName := range *metrics { + query = append(query, metricName.ID) + } + metricsPath := path + "?get=" + strings.Join(query, ",") + + // Get the metric values using the query + body, err = c.get(ctx, metricsPath) + if err != nil { + c.logger.Debug("failed to retrieve metric values", zap.Error(err)) + return nil, err + } + + // Populates metric values + err = json.Unmarshal(body, &metrics) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response body: %w", err) + } + + return metrics, nil +} + +// GetJobManagerMetrics gets the jobmanager metrics. +func (c *flinkClient) GetJobmanagerMetrics(ctx context.Context) (*models.JobmanagerMetrics, error) { + // Get the metric names and values for jobmanager + metrics, err := c.getMetrics(ctx, jobmanagerMetricEndpoint) + if err != nil { + return nil, err + } + + // Add a hostname used to identify between multiple jobmanager instances + return &models.JobmanagerMetrics{ + Host: c.hostName, + Metrics: *metrics, + }, nil +} + +// GetTaskmanagersMetrics gets the Taskmanager metrics for each taskmanager. +func (c *flinkClient) GetTaskmanagersMetrics(ctx context.Context) ([]*models.TaskmanagerMetrics, error) { + // Get the taskmanager id list + var taskmanagerIDs *models.TaskmanagerIDsResponse + body, err := c.get(ctx, taskmanagersEndpoint) + if err != nil { + c.logger.Debug("failed to retrieve taskmanager IDs", zap.Error(err)) + return nil, err + } + + // Populates taskmanager id names + err = json.Unmarshal(body, &taskmanagerIDs) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response body: %w", err) + } + + // Get taskmanager metrics for each taskmanager id + return c.getTaskmanagersMetricsByIDs(ctx, taskmanagerIDs) +} + +// getTaskmanagersMetricsByIDs gets taskmanager metrics for each task manager id. +func (c *flinkClient) getTaskmanagersMetricsByIDs(ctx context.Context, taskmanagerIDs *models.TaskmanagerIDsResponse) ([]*models.TaskmanagerMetrics, error) { + var taskmanagerInstances []*models.TaskmanagerMetrics + for _, taskmanager := range taskmanagerIDs.Taskmanagers { + query := fmt.Sprintf(taskmanagersMetricEndpoint, taskmanager.ID) + metrics, err := c.getMetrics(ctx, query) + if err != nil { + return nil, err + } + + taskmanagerInstance := &models.TaskmanagerMetrics{ + TaskmanagerID: getTaskmanagerID(taskmanager.ID), + Host: getTaskmanagerHost(taskmanager.ID), + Metrics: *metrics, + } + taskmanagerInstances = append(taskmanagerInstances, taskmanagerInstance) + } + return taskmanagerInstances, nil +} + +// GetJobsMetrics gets the job metrics for each job. +func (c *flinkClient) GetJobsMetrics(ctx context.Context) ([]*models.JobMetrics, error) { + // Get the job id and name list + var jobIDs *models.JobOverviewResponse + body, err := c.get(ctx, jobsOverviewEndpoint) + if err != nil { + c.logger.Debug("failed to retrieve job IDs", zap.Error(err)) + return nil, err + } + + // Populates job id and names + err = json.Unmarshal(body, &jobIDs) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response body: %w", err) + } + + // Get job metrics for each job id + return c.getJobsMetricsByIDs(ctx, jobIDs) +} + +// getJobsMetricsByIDs gets jobs metrics for each job id. +func (c *flinkClient) getJobsMetricsByIDs(ctx context.Context, jobIDs *models.JobOverviewResponse) ([]*models.JobMetrics, error) { + var jobInstances []*models.JobMetrics + for _, job := range jobIDs.Jobs { + query := fmt.Sprintf(jobsMetricEndpoint, job.Jid) + metrics, err := c.getMetrics(ctx, query) + if err != nil { + return nil, err + } + jobInstance := models.JobMetrics{ + Host: c.hostName, + JobName: job.Name, + Metrics: *metrics, + } + jobInstances = append(jobInstances, &jobInstance) + } + return jobInstances, nil +} + +// GetSubtasksMetrics gets subtask metrics for each job id, vertex id and subtask index. +func (c *flinkClient) GetSubtasksMetrics(ctx context.Context) ([]*models.SubtaskMetrics, error) { + // Get the job id's + var jobsResponse *models.JobsResponse + body, err := c.get(ctx, jobsEndpoint) + if err != nil { + c.logger.Debug("failed to retrieve job IDs", zap.Error(err)) + return nil, err + } + + // Populates the job id + err = json.Unmarshal(body, &jobsResponse) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response body: %w", err) + } + return c.getSubtasksMetricsByIDs(ctx, jobsResponse) +} + +// getSubtasksMetricsByIDs gets subtask metrics for each job id, vertex id and subtask index. +func (c *flinkClient) getSubtasksMetricsByIDs(ctx context.Context, jobsResponse *models.JobsResponse) ([]*models.SubtaskMetrics, error) { + var subtaskInstances []*models.SubtaskMetrics + // Get vertices for each job + for _, job := range jobsResponse.Jobs { + var jobsWithIDResponse *models.JobsWithIDResponse + query := fmt.Sprintf(jobsWithIDEndpoint, job.ID) + body, err := c.get(ctx, query) + if err != nil { + c.logger.Debug("failed to retrieve job with ID", zap.Error(err)) + return nil, err + } + + // Populates the job response with vertices info + err = json.Unmarshal(body, &jobsWithIDResponse) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response body: %w", err) + } + // Gets subtask info for each vertex id + for _, vertex := range jobsWithIDResponse.Vertices { + var vertexResponse *models.VerticesResponse + query := fmt.Sprintf(verticesEndpoint, job.ID, vertex.ID) + body, err = c.get(ctx, query) + if err != nil { + c.logger.Debug("failed to retrieve vertex with ID", zap.Error(err)) + return nil, err + } + + // Populates the vertex response with subtask info + err = json.Unmarshal(body, &vertexResponse) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response body: %w", err) + } + + // Gets subtask metrics for each vertex id + for _, subtask := range vertexResponse.Subtasks { + query := fmt.Sprintf(subtaskMetricEndpoint, job.ID, vertex.ID, subtask.Subtask) + subtaskMetrics, err := c.getMetrics(ctx, query) + if err != nil { + c.logger.Debug("failed to retrieve subtasks metrics", zap.Error(err)) + return nil, err + } + + // Stores subtask info with additional attribute values to uniquely identify metrics + subtaskInstances = append(subtaskInstances, + &models.SubtaskMetrics{ + Host: getTaskmanagerHost(subtask.TaskmanagerID), + TaskmanagerID: getTaskmanagerID(subtask.TaskmanagerID), + JobName: jobsWithIDResponse.Name, + TaskName: vertex.Name, + SubtaskIndex: fmt.Sprintf("%v", subtask.Subtask), + Metrics: *subtaskMetrics, + }) + } + } + } + return subtaskInstances, nil +} + +// Override for testing +var osHostname = os.Hostname + +func getHostname() (string, error) { + host, err := osHostname() + if err != nil { + return "", err + } + return host, nil +} + +// Override for testing +var taskmanagerHost = strings.Split + +func getTaskmanagerHost(id string) string { + host := taskmanagerHost(id, ":") + return host[0] +} + +func reflect(s string) string { + return s +} + +// Override for testing +var taskmanagerID = reflect + +func getTaskmanagerID(id string) string { + return taskmanagerID(id) +} diff --git a/receiver/flinkmetricsreceiver/client_test.go b/receiver/flinkmetricsreceiver/client_test.go new file mode 100644 index 0000000000000..5defefcd8be27 --- /dev/null +++ b/receiver/flinkmetricsreceiver/client_test.go @@ -0,0 +1,594 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// nolint:errcheck +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" + +import ( + "context" + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "regexp" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configtls" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/models" +) + +const ( + // filenames for api responses + jobsIDs = "jobs_ids.json" + jobsMetricValues = "jobs_metric_values.json" + jobsWithID = "jobs_with_id.json" + subtaskMetricValues = "subtask_metric_values.json" + vertices = "vertices.json" + jobmanagerMetricValues = "jobmanager_metric_values.json" + jobsOverview = "jobs_overview.json" + taskmanagerIds = "taskmanager_ids.json" + taskmanagerMetricValues = "taskmanager_metric_values.json" + + // regex for endpoint matching + jobsWithIDRegex = "^/jobs/[a-z0-9]+$" + taskmanagerMetricNamesRegex = "^/taskmanagers/[a-z0-9.:-]+/metrics$" + verticesRegex = "^/jobs/[a-z0-9]+/vertices/[a-z0-9]+$" + jobsMetricNamesRegex = "^/jobs/[a-z0-9]+/metrics$" + subtaskMetricNamesRegex = "^/jobs/[a-z0-9]+/vertices/[a-z0-9]+/subtasks/[0-9]+/metrics$" + taskmanagerIDsRegex = "^/taskmanagers$" + apiResponses = "apiresponses" +) + +func TestNewClient(t *testing.T) { + testCase := []struct { + desc string + cfg *Config + host component.Host + settings component.TelemetrySettings + logger *zap.Logger + expectError error + }{ + { + desc: "Invalid HTTP config", + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: defaultEndpoint, + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/non/existent", + }, + }, + }, + }, + host: componenttest.NewNopHost(), + settings: componenttest.NewNopTelemetrySettings(), + logger: zap.NewNop(), + expectError: errors.New("failed to create HTTP Client"), + }, + { + desc: "Valid Configuration", + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + TLSSetting: configtls.TLSClientSetting{}, + Endpoint: defaultEndpoint, + }, + }, + host: componenttest.NewNopHost(), + settings: componenttest.NewNopTelemetrySettings(), + logger: zap.NewNop(), + expectError: nil, + }, + } + + for _, tc := range testCase { + t.Run(tc.desc, func(t *testing.T) { + ac, err := newClient(tc.cfg, tc.host, tc.settings, tc.logger) + if tc.expectError != nil { + require.Nil(t, ac) + require.Contains(t, err.Error(), tc.expectError.Error()) + } else { + require.NoError(t, err) + + actualClient, ok := ac.(*flinkClient) + require.True(t, ok) + + require.Equal(t, tc.cfg.Endpoint, actualClient.hostEndpoint) + require.Equal(t, tc.logger, actualClient.logger) + require.NotNil(t, actualClient.client) + } + }) + } +} + +func createTestClient(t *testing.T, baseEndpoint string) client { + t.Helper() + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = baseEndpoint + + testClient, err := newClient(cfg, componenttest.NewNopHost(), componenttest.NewNopTelemetrySettings(), zap.NewNop()) + require.NoError(t, err) + return testClient +} + +func TestGetJobmanagerMetrics(t *testing.T) { + testCases := []struct { + desc string + testFunc func(*testing.T) + }{ + { + desc: "Non-200 Response", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetJobmanagerMetrics(context.Background()) + require.Nil(t, metrics) + require.EqualError(t, err, "non 200 code returned 401") + }, + }, + { + desc: "Bad payload returned", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write([]byte("{")) + require.NoError(t, err) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetJobmanagerMetrics(context.Background()) + require.Nil(t, metrics) + require.Contains(t, err.Error(), "failed to unmarshal response body") + }, + }, + { + desc: "Successful call", + testFunc: func(t *testing.T) { + jobmanagerMetricValuesData := loadAPIResponseData(t, apiResponses, jobmanagerMetricValues) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write(jobmanagerMetricValuesData) + require.NoError(t, err) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + // Load the valid data into a struct to compare + var expected *models.MetricsResponse + err := json.Unmarshal(jobmanagerMetricValuesData, &expected) + require.NoError(t, err) + + actual, err := tc.GetJobmanagerMetrics(context.Background()) + require.NoError(t, err) + require.Equal(t, expected, &actual.Metrics) + + hostname, err := os.Hostname() + require.Nil(t, err) + require.EqualValues(t, hostname, actual.Host) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, tc.testFunc) + } +} + +func TestGetTaskmanagersMetrics(t *testing.T) { + testCases := []struct { + desc string + testFunc func(*testing.T) + }{ + { + desc: "Non-200 Response", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetTaskmanagersMetrics(context.Background()) + require.Nil(t, metrics) + require.EqualError(t, err, "non 200 code returned 401") + }, + }, + { + desc: "Bad taskmanagers payload returned", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write([]byte(`{`)) + require.NoError(t, err) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetTaskmanagersMetrics(context.Background()) + require.Nil(t, metrics) + require.Contains(t, err.Error(), "failed to unmarshal response body:") + }, + }, + { + desc: "Bad taskmanagers metrics payload returned", + testFunc: func(t *testing.T) { + taskmanagerIDs := loadAPIResponseData(t, apiResponses, taskmanagerIds) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if match, _ := regexp.MatchString(taskmanagerIDsRegex, r.URL.Path); match { + _, err := w.Write(taskmanagerIDs) + require.NoError(t, err) + return + } + + _, err := w.Write([]byte("{")) + require.NoError(t, err) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetTaskmanagersMetrics(context.Background()) + require.Nil(t, metrics) + require.Contains(t, err.Error(), "failed to unmarshal response body:") + }, + }, + { + desc: "Successful call", + testFunc: func(t *testing.T) { + taskmanagerIDs := loadAPIResponseData(t, apiResponses, taskmanagerIds) + taskmanagerMetricValuesData := loadAPIResponseData(t, apiResponses, taskmanagerMetricValues) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if match, _ := regexp.MatchString(taskmanagerIDsRegex, r.URL.Path); match { + _, err := w.Write(taskmanagerIDs) + require.NoError(t, err) + return + } + + if match, _ := regexp.MatchString(taskmanagerMetricNamesRegex, r.URL.Path); match { + _, err := w.Write(taskmanagerMetricValuesData) + require.NoError(t, err) + return + } + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + // Load the valid data into a struct to compare + var expected *models.MetricsResponse + err := json.Unmarshal(taskmanagerMetricValuesData, &expected) + require.NoError(t, err) + + actual, err := tc.GetTaskmanagersMetrics(context.Background()) + require.NoError(t, err) + require.Len(t, actual, 1) + require.Equal(t, expected, &actual[0].Metrics) + require.EqualValues(t, "172.26.0.3", actual[0].Host) + require.EqualValues(t, "172.26.0.3:34457-7b2520", actual[0].TaskmanagerID) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, tc.testFunc) + } +} + +func TestGetJobsMetrics(t *testing.T) { + testCases := []struct { + desc string + testFunc func(*testing.T) + }{ + { + desc: "Non-200 Response", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetJobsMetrics(context.Background()) + require.Nil(t, metrics) + require.EqualError(t, err, "non 200 code returned 401") + }, + }, + { + desc: "Bad payload returned", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write([]byte(`{`)) + require.NoError(t, err) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetJobsMetrics(context.Background()) + require.Nil(t, metrics) + require.Contains(t, err.Error(), "failed to unmarshal response body") + }, + }, + { + desc: "bad payload returned call", + testFunc: func(t *testing.T) { + jobsOverviewData := loadAPIResponseData(t, apiResponses, jobsOverview) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == jobsOverviewEndpoint { + _, err := w.Write(jobsOverviewData) + require.NoError(t, err) + return + } + _, err := w.Write([]byte(`{`)) + require.NoError(t, err) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetJobsMetrics(context.Background()) + require.Nil(t, metrics) + require.Contains(t, err.Error(), "failed to unmarshal response body") + }, + }, + { + desc: "Successful call", + testFunc: func(t *testing.T) { + jobsOverviewData := loadAPIResponseData(t, apiResponses, jobsOverview) + jobsMetricValuesData := loadAPIResponseData(t, apiResponses, jobsMetricValues) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == jobsOverviewEndpoint { + _, err := w.Write(jobsOverviewData) + require.NoError(t, err) + return + } + if match, _ := regexp.MatchString(jobsMetricNamesRegex, r.URL.Path); match { + _, err := w.Write(jobsMetricValuesData) + require.NoError(t, err) + return + } + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + // Load the valid data into a struct to compare + var expected *models.MetricsResponse + err := json.Unmarshal(jobsMetricValuesData, &expected) + require.NoError(t, err) + + actual, err := tc.GetJobsMetrics(context.Background()) + require.NoError(t, err) + require.Len(t, actual, 1) + require.Equal(t, expected, &actual[0].Metrics) + require.EqualValues(t, "State machine job", actual[0].JobName) + + hostname, err := os.Hostname() + require.Nil(t, err) + require.EqualValues(t, hostname, actual[0].Host) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, tc.testFunc) + } +} + +func TestGetSubtasksMetrics(t *testing.T) { + testCases := []struct { + desc string + testFunc func(*testing.T) + }{ + { + desc: "Non-200 Response", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetSubtasksMetrics(context.Background()) + require.Nil(t, metrics) + require.EqualError(t, err, "non 200 code returned 401") + }, + }, + { + desc: "Bad payload returned", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write([]byte("{")) + require.NoError(t, err) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetSubtasksMetrics(context.Background()) + require.Nil(t, metrics) + require.Contains(t, err.Error(), "failed to unmarshal response body") + }, + }, + { + desc: "Bad payload jobs IDs returned", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + jobsData := loadAPIResponseData(t, apiResponses, jobsIDs) + if r.URL.Path == jobsEndpoint { + _, err := w.Write(jobsData) + require.NoError(t, err) + return + } + _, err := w.Write([]byte("{")) + require.NoError(t, err) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetSubtasksMetrics(context.Background()) + require.Nil(t, metrics) + require.Contains(t, err.Error(), "failed to unmarshal response body") + }, + }, + { + desc: "Bad payload vertices IDs returned", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + jobsData := loadAPIResponseData(t, apiResponses, jobsIDs) + jobsWithIDData := loadAPIResponseData(t, apiResponses, jobsWithID) + if r.URL.Path == jobsEndpoint { + _, err := w.Write(jobsData) + require.NoError(t, err) + return + } + if match, _ := regexp.MatchString(jobsWithIDRegex, r.URL.Path); match { + _, err := w.Write(jobsWithIDData) + require.NoError(t, err) + return + } + _, err := w.Write([]byte("{")) + require.NoError(t, err) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetSubtasksMetrics(context.Background()) + require.Nil(t, metrics) + require.Contains(t, err.Error(), "failed to unmarshal response body") + }, + }, + { + desc: "Bad payload subtask metrics returned", + testFunc: func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + jobsData := loadAPIResponseData(t, apiResponses, jobsIDs) + jobsWithIDData := loadAPIResponseData(t, apiResponses, jobsWithID) + verticesData := loadAPIResponseData(t, apiResponses, vertices) + if r.URL.Path == jobsEndpoint { + _, err := w.Write(jobsData) + require.NoError(t, err) + return + } + if match, _ := regexp.MatchString(jobsWithIDRegex, r.URL.Path); match { + _, err := w.Write(jobsWithIDData) + require.NoError(t, err) + return + } + if match, _ := regexp.MatchString(verticesRegex, r.URL.Path); match { + _, err := w.Write(verticesData) + require.NoError(t, err) + return + } + _, err := w.Write([]byte("{")) + require.NoError(t, err) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + metrics, err := tc.GetSubtasksMetrics(context.Background()) + require.Nil(t, metrics) + require.Contains(t, err.Error(), "failed to unmarshal response body") + }, + }, + { + desc: "Successful call", + testFunc: func(t *testing.T) { + jobsData := loadAPIResponseData(t, apiResponses, jobsIDs) + jobsWithIDData := loadAPIResponseData(t, apiResponses, jobsWithID) + verticesData := loadAPIResponseData(t, apiResponses, vertices) + subtaskMetricValuesData := loadAPIResponseData(t, apiResponses, subtaskMetricValues) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == jobsEndpoint { + _, err := w.Write(jobsData) + require.NoError(t, err) + return + } + if match, _ := regexp.MatchString(jobsWithIDRegex, r.URL.Path); match { + _, err := w.Write(jobsWithIDData) + require.NoError(t, err) + return + } + if match, _ := regexp.MatchString(verticesRegex, r.URL.Path); match { + _, err := w.Write(verticesData) + require.NoError(t, err) + return + } + if match, _ := regexp.MatchString(subtaskMetricNamesRegex, r.URL.Path); match { + _, err := w.Write(subtaskMetricValuesData) + require.NoError(t, err) + return + } + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + var e *models.JobsResponse + _ = json.Unmarshal(jobsData, &e) + require.EqualValues(t, e.Jobs[0].ID, "54a5c6e527e00e1bb861272a39fe13e4") + + // Load the valid data into a struct to compare + var expected *models.MetricsResponse + err := json.Unmarshal(subtaskMetricValuesData, &expected) + require.NoError(t, err) + + actual, err := tc.GetSubtasksMetrics(context.Background()) + require.NoError(t, err) + require.Len(t, actual, 2) + require.Equal(t, expected, &actual[0].Metrics) + require.EqualValues(t, "State machine job", actual[0].JobName) + require.EqualValues(t, "172.26.0.3", actual[0].Host) + // require.EqualValues(t, "flink-worker", actual[0].Host) + require.EqualValues(t, "172.26.0.3:34457-7b2520", actual[0].TaskmanagerID) + require.EqualValues(t, "Source: Custom Source", actual[0].TaskName) + require.EqualValues(t, "0", actual[0].SubtaskIndex) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, tc.testFunc) + } +} + +func loadAPIResponseData(t *testing.T, folder, fileName string) []byte { + t.Helper() + fullPath := filepath.Join("testdata", folder, fileName) + + data, err := ioutil.ReadFile(fullPath) + require.NoError(t, err) + + return data +} diff --git a/receiver/flinkmetricsreceiver/config.go b/receiver/flinkmetricsreceiver/config.go new file mode 100644 index 0000000000000..f6bf2dcc8d5ab --- /dev/null +++ b/receiver/flinkmetricsreceiver/config.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" + +import ( + "fmt" + "net/url" + + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/metadata" +) + +const defaultEndpoint = "http://localhost:8081" + +// Config defines the configuration for the various elements of the receiver agent. +type Config struct { + scraperhelper.ScraperControllerSettings `mapstructure:",squash"` + confighttp.HTTPClientSettings `mapstructure:",squash"` + Metrics metadata.MetricsSettings `mapstructure:"metrics"` +} + +// Validate validates the configuration by checking for missing or invalid fields +func (cfg *Config) Validate() error { + if _, err := url.Parse(cfg.Endpoint); err != nil { + return fmt.Errorf("\"endpoint\" must be in the form of ://:: %w", err) + } + + return nil +} diff --git a/receiver/flinkmetricsreceiver/config_test.go b/receiver/flinkmetricsreceiver/config_test.go new file mode 100644 index 0000000000000..273211ba3a546 --- /dev/null +++ b/receiver/flinkmetricsreceiver/config_test.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/config/confighttp" +) + +func TestValidate(t *testing.T) { + testCases := []struct { + desc string + cfg *Config + expectedErr error + }{ + { + desc: "invalid endpoint", + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "invalid://endpoint: 12efg", + }, + }, + expectedErr: fmt.Errorf("\"endpoint\" must be in the form of ://:: %w", errors.New(`parse "invalid://endpoint: 12efg": invalid port ": 12efg" after host`)), + }, + { + desc: "valid config", + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: defaultEndpoint, + }, + }, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + actualErr := tc.cfg.Validate() + if tc.expectedErr != nil { + require.EqualError(t, actualErr, tc.expectedErr.Error()) + } else { + require.NoError(t, actualErr) + } + + }) + } +} diff --git a/receiver/flinkmetricsreceiver/doc.go b/receiver/flinkmetricsreceiver/doc.go new file mode 100644 index 0000000000000..c332d90221b08 --- /dev/null +++ b/receiver/flinkmetricsreceiver/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mdatagen --experimental-gen metadata.yaml + +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" diff --git a/receiver/flinkmetricsreceiver/documentation.md b/receiver/flinkmetricsreceiver/documentation.md new file mode 100644 index 0000000000000..030842a4f25fb --- /dev/null +++ b/receiver/flinkmetricsreceiver/documentation.md @@ -0,0 +1,68 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# flinkmetricsreceiver + +## Metrics + +These are the metrics available for this scraper. + +| Name | Description | Unit | Type | Attributes | +| ---- | ----------- | ---- | ---- | ---------- | +| **flink.job.checkpoint.count** | The number of checkpoints completed or failed. | {checkpoints} | Sum(Int) |
  • checkpoint
| +| **flink.job.checkpoint.in_progress** | The number of checkpoints in progress. | {checkpoints} | Sum(Int) |
| +| **flink.job.last_checkpoint.size** | The total size of the last checkpoint. | By | Sum(Int) |
| +| **flink.job.last_checkpoint.time** | The end to end duration of the last checkpoint. | ms | Gauge(Int) |
| +| **flink.job.restart.count** | The total number of restarts since this job was submitted, including full restarts and fine-grained restarts. | {restarts} | Sum(Int) |
| +| **flink.jvm.class_loader.classes_loaded** | The total number of classes loaded since the start of the JVM. | {classes} | Sum(Int) |
| +| **flink.jvm.cpu.load** | The CPU usage of the JVM for a jobmanager or taskmanager. | % | Gauge(Double) |
| +| **flink.jvm.cpu.time** | The CPU time used by the JVM for a jobmanager or taskmanager. | ns | Sum(Int) |
| +| **flink.jvm.gc.collections.count** | The total number of collections that have occurred. | {collections} | Sum(Int) |
  • garbage_collector_name
| +| **flink.jvm.gc.collections.time** | The total time spent performing garbage collection. | ms | Sum(Int) |
  • garbage_collector_name
| +| **flink.jvm.memory.direct.total_capacity** | The total capacity of all buffers in the direct buffer pool. | By | Sum(Int) |
| +| **flink.jvm.memory.direct.used** | The amount of memory used by the JVM for the direct buffer pool. | By | Sum(Int) |
| +| **flink.jvm.memory.heap.committed** | The amount of heap memory guaranteed to be available to the JVM. | By | Sum(Int) |
| +| **flink.jvm.memory.heap.max** | The maximum amount of heap memory that can be used for memory management. | By | Sum(Int) |
| +| **flink.jvm.memory.heap.used** | The amount of heap memory currently used. | By | Sum(Int) |
| +| **flink.jvm.memory.mapped.total_capacity** | The number of buffers in the mapped buffer pool. | By | Sum(Int) |
| +| **flink.jvm.memory.mapped.used** | The amount of memory used by the JVM for the mapped buffer pool. | By | Sum(Int) |
| +| **flink.jvm.memory.metaspace.committed** | The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool. | By | Sum(Int) |
| +| **flink.jvm.memory.metaspace.max** | The maximum amount of memory that can be used in the Metaspace memory pool. | By | Sum(Int) |
| +| **flink.jvm.memory.metaspace.used** | The amount of memory currently used in the Metaspace memory pool. | By | Sum(Int) |
| +| **flink.jvm.memory.nonheap.committed** | The amount of non-heap memory guaranteed to be available to the JVM. | By | Sum(Int) |
| +| **flink.jvm.memory.nonheap.max** | The maximum amount of non-heap memory that can be used for memory management. | By | Sum(Int) |
| +| **flink.jvm.memory.nonheap.used** | The amount of non-heap memory currently used. | By | Sum(Int) |
| +| **flink.jvm.threads.count** | The total number of live threads. | {threads} | Sum(Int) |
| +| **flink.memory.managed.total** | The total amount of managed memory. | By | Sum(Int) |
| +| **flink.memory.managed.used** | The amount of managed memory currently used. | By | Sum(Int) |
| +| **flink.operator.record.count** | The number of records an operator has. | {records} | Sum(Int) |
  • operator_name
  • record
| +| **flink.operator.watermark.output** | The last watermark this operator has emitted. | ms | Sum(Int) |
  • operator_name
| +| **flink.task.record.count** | The number of records a task has. | {records} | Sum(Int) |
  • record
| + +**Highlighted metrics** are emitted by default. Other metrics are optional and not emitted by default. +Any metric can be enabled or disabled with the following scraper configuration: + +```yaml +metrics: + : + enabled: +``` + +## Resource attributes + +| Name | Description | Type | +| ---- | ----------- | ---- | +| flink.job.name | The job name. | String | +| flink.resource.type | The flink scope type in which a metric belongs to. | String | +| flink.subtask.index | The subtask index. | String | +| flink.task.name | The task name. | String | +| flink.taskmanager.id | The taskmanager ID. | String | +| host.name | The host name. | String | + +## Metric attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| checkpoint | The number of checkpoints completed or that failed. | completed, failed | +| garbage_collector_name | The names for the parallel scavenge and garbage first garbage collectors. | PS_MarkSweep, PS_Scavenge, G1_Young_Generation, G1_Old_Generation | +| operator_name | The operator name. | | +| record | The number of records received in, sent out or dropped due to arriving late. | in, out, dropped | diff --git a/receiver/flinkmetricsreceiver/factory.go b/receiver/flinkmetricsreceiver/factory.go new file mode 100644 index 0000000000000..ed9a723a2d4fa --- /dev/null +++ b/receiver/flinkmetricsreceiver/factory.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/metadata" +) + +const typeStr = "flinkmetrics" + +var errConfigNotflinkmetrics = errors.New("config was not a flinkmetrics receiver config") + +// NewFactory creates a new receiver factory +func NewFactory() component.ReceiverFactory { + return component.NewReceiverFactory( + typeStr, + createDefaultConfig, + component.WithMetricsReceiver(createMetricsReceiver), + ) +} + +func createDefaultConfig() config.Receiver { + return &Config{ + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + ReceiverSettings: config.NewReceiverSettings(config.NewComponentID(typeStr)), + CollectionInterval: 10 * time.Second, + }, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: defaultEndpoint, + Timeout: 10 * time.Second, + }, + Metrics: metadata.DefaultMetricsSettings(), + } +} + +func createMetricsReceiver( + _ context.Context, + params component.ReceiverCreateSettings, + rConf config.Receiver, + consumer consumer.Metrics, +) (component.MetricsReceiver, error) { + cfg, ok := rConf.(*Config) + if !ok { + return nil, errConfigNotflinkmetrics + } + ns := newflinkScraper(cfg, params) + scraper, err := scraperhelper.NewScraper(typeStr, ns.scrape, scraperhelper.WithStart(ns.start)) + if err != nil { + return nil, err + } + + return scraperhelper.NewScraperControllerReceiver( + &cfg.ScraperControllerSettings, params, consumer, + scraperhelper.AddScraper(scraper), + ) +} diff --git a/receiver/flinkmetricsreceiver/factory_test.go b/receiver/flinkmetricsreceiver/factory_test.go new file mode 100644 index 0000000000000..7a3e5c1776828 --- /dev/null +++ b/receiver/flinkmetricsreceiver/factory_test.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/metadata" +) + +func TestNewFactory(t *testing.T) { + testCases := []struct { + desc string + testFunc func(*testing.T) + }{ + { + desc: "creates a new factory with correct type", + testFunc: func(t *testing.T) { + factory := NewFactory() + require.EqualValues(t, typeStr, factory.Type()) + }, + }, + { + desc: "creates a new factory with valid default config", + testFunc: func(t *testing.T) { + factory := NewFactory() + + var expectedCfg config.Receiver = &Config{ + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + ReceiverSettings: config.NewReceiverSettings(config.NewComponentID(typeStr)), + CollectionInterval: 10 * time.Second, + }, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: defaultEndpoint, + Timeout: 10 * time.Second, + }, + Metrics: metadata.DefaultMetricsSettings(), + } + + require.Equal(t, expectedCfg, factory.CreateDefaultConfig()) + }, + }, + { + desc: "creates a new factory and CreateMetricReceiver returns no error", + testFunc: func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + _, err := factory.CreateMetricsReceiver( + context.Background(), + componenttest.NewNopReceiverCreateSettings(), + cfg, + consumertest.NewNop(), + ) + require.NoError(t, err) + }, + }, + { + desc: "creates a new factory and CreateMetricReceiver returns error with incorrect config", + testFunc: func(t *testing.T) { + factory := NewFactory() + _, err := factory.CreateMetricsReceiver( + context.Background(), + componenttest.NewNopReceiverCreateSettings(), + nil, + consumertest.NewNop(), + ) + require.ErrorIs(t, err, errConfigNotflinkmetrics) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, tc.testFunc) + } +} diff --git a/receiver/flinkmetricsreceiver/go.mod b/receiver/flinkmetricsreceiver/go.mod new file mode 100644 index 0000000000000..bbd239b199038 --- /dev/null +++ b/receiver/flinkmetricsreceiver/go.mod @@ -0,0 +1,73 @@ +module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver + +go 1.17 + +require ( + github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.52.0 + github.com/stretchr/testify v1.7.1 + github.com/testcontainers/testcontainers-go v0.13.0 + go.opentelemetry.io/collector v0.52.0 + go.opentelemetry.io/collector/pdata v0.52.0 + go.uber.org/zap v1.21.0 +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Microsoft/go-winio v0.4.17 // indirect + github.com/Microsoft/hcsshim v0.8.23 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/containerd/cgroups v1.0.1 // indirect + github.com/containerd/containerd v1.5.9 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/docker v20.10.11+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/felixge/httpsnoop v1.0.2 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.15.4 // indirect + github.com/knadh/koanf v1.4.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/sys/mount v0.2.0 // indirect + github.com/moby/sys/mountinfo v0.5.0 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runc v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/rs/cors v1.8.2 // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/stretchr/objx v0.2.0 // indirect + go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0 // indirect + go.opentelemetry.io/otel v1.7.0 // indirect + go.opentelemetry.io/otel/metric v0.30.0 // indirect + go.opentelemetry.io/otel/trace v1.7.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/grpc v1.46.2 // indirect + google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest diff --git a/receiver/flinkmetricsreceiver/go.sum b/receiver/flinkmetricsreceiver/go.sum new file mode 100644 index 0000000000000..64ba8e5ce77a6 --- /dev/null +++ b/receiver/flinkmetricsreceiver/go.sum @@ -0,0 +1,1162 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.11+incompatible h1:OqzI/g/W54LczvhnccGqniFoQghHx3pklbLuhfXpqGo= +github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ= +github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/testcontainers/testcontainers-go v0.13.0 h1:OUujSlEGsXVo/ykPVZk3KanBNGN0TYb/7oKIPVn15JA= +github.com/testcontainers/testcontainers-go v0.13.0/go.mod h1:z1abufU633Eb/FmSBTzV6ntZAC1eZBYPtaFsn4nPuDk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/collector v0.52.0 h1:f8860C+jAGtfBIKmF5UFCmKHA3P3ni/KZuVbz3qgvQk= +go.opentelemetry.io/collector v0.52.0/go.mod h1:a9GvaOhyc0nVOUzqvdv5mxyWghCSso/WRO2GgRl4I1g= +go.opentelemetry.io/collector/pdata v0.52.0 h1:B0L9fkqKq5xRKFjICK9i11PRyTR52CCYSpTWaynf1Qc= +go.opentelemetry.io/collector/pdata v0.52.0/go.mod h1:GJUTfTv8mlYpHRjcmHXVbvJr48EW/q/P/HuBvpXAE58= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0 h1:mac9BKRqwaX6zxHPDe3pvmWpwuuIM0vuXv2juCnQevE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY= +go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c= +go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= +go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0= +go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/gotestsum v1.7.0/go.mod h1:V1m4Jw3eBerhI/A6qCxUE07RnCg7ACkKj9BYcAm09V8= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/receiver/flinkmetricsreceiver/integration_test.go b/receiver/flinkmetricsreceiver/integration_test.go new file mode 100644 index 0000000000000..4f64a8f5a04bd --- /dev/null +++ b/receiver/flinkmetricsreceiver/integration_test.go @@ -0,0 +1,205 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build integration +// +build integration + +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest/golden" +) + +func TestFlinkIntegration(t *testing.T) { + t.Parallel() + networkName := "new-network" + ctx := context.Background() + newNetwork, err := testcontainers.GenericNetwork(ctx, testcontainers.GenericNetworkRequest{ + NetworkRequest: testcontainers.NetworkRequest{ + Name: networkName, + CheckDuplicate: true, + }, + }) + if err != nil { + require.NoError(t, err) + } + defer func() { + require.NoError(t, newNetwork.Remove(ctx)) + }() + + masterContainer := getContainer(t, testcontainers.ContainerRequest{ + FromDockerfile: testcontainers.FromDockerfile{ + Context: path.Join("testdata", "integration"), + Dockerfile: "Dockerfile.flink-master", + }, + Hostname: "flink-master", + Networks: []string{networkName}, + ExposedPorts: []string{"8080:8080", "8081:8081"}, + WaitingFor: waitStrategy{endpoint: "http://localhost:8081/jobmanager/metrics"}, + }) + + workerContainer := getContainer(t, testcontainers.ContainerRequest{ + FromDockerfile: testcontainers.FromDockerfile{ + Context: path.Join("testdata", "integration"), + Dockerfile: "Dockerfile.flink-worker", + }, + Hostname: "worker", + Networks: []string{networkName}, + }) + defer func() { + require.NoError(t, masterContainer.Terminate(context.Background())) + }() + defer func() { + require.NoError(t, workerContainer.Terminate(context.Background())) + }() + + hostname, err := masterContainer.Host(context.Background()) + require.NoError(t, err) + + // Required to start the taskmanager + ws := waitStrategy{"http://localhost:8081/taskmanagers/metrics"} + err = ws.waitFor(context.Background(), "") + require.NoError(t, err) + + // required to start the StateMachineExample job + code, err := masterContainer.Exec(context.Background(), []string{"/setup.sh"}) + require.NoError(t, err) + require.Equal(t, 0, code) + + // Required to prevent empty value jobs call + ws = waitStrategy{endpoint: "http://localhost:8081/jobs"} + err = ws.waitFor(context.Background(), "") + require.NoError(t, err) + + // Required to prevent empty value for job, operator and task metrics call + ws = waitStrategy{endpoint: "http://localhost:8081/jobs/metrics"} + err = ws.waitFor(context.Background(), "") + require.NoError(t, err) + + // Override function to return deterministic field + defer func() { osHostname = os.Hostname }() + osHostname = func() (string, error) { return "job-localhost", nil } + + // Override function to return deterministic field + defer func() { taskmanagerHost = strings.Split }() + taskmanagerHost = func(id string, sep string) []string { return []string{"taskmanager-localhost"} } + + // Override function to return deterministic field + defer func() { taskmanagerID = reflect }() + taskmanagerID = func(id string) string { return "taskmanagerID" } + + f := NewFactory() + cfg := f.CreateDefaultConfig().(*Config) + cfg.ScraperControllerSettings.CollectionInterval = 100 * time.Millisecond + cfg.Endpoint = fmt.Sprintf("http://%s", net.JoinHostPort(hostname, "8081")) + + consumer := new(consumertest.MetricsSink) + settings := componenttest.NewNopReceiverCreateSettings() + rcvr, err := f.CreateMetricsReceiver(context.Background(), settings, cfg, consumer) + require.NoError(t, err, "failed creating metrics receiver") + require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) + require.Eventuallyf(t, func() bool { + return consumer.DataPointCount() > 0 + }, 2*time.Minute, 1*time.Second, "failed to receive more than 0 metrics") + require.NoError(t, rcvr.Shutdown(context.Background())) + + actualMetrics := consumer.AllMetrics()[0] + expectedFile := filepath.Join("testdata", "integration", "expected.json") + expectedMetrics, err := golden.ReadMetrics(expectedFile) + require.NoError(t, err) + require.NoError(t, scrapertest.CompareMetrics(expectedMetrics, actualMetrics, scrapertest.IgnoreMetricValues())) +} + +func getContainer(t *testing.T, req testcontainers.ContainerRequest) testcontainers.Container { + require.NoError(t, req.Validate()) + container, err := testcontainers.GenericContainer( + context.Background(), + testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + require.NoError(t, err) + return container +} + +type waitStrategy struct { + endpoint string +} + +func (ws waitStrategy) WaitUntilReady(ctx context.Context, st wait.StrategyTarget) error { + if err := wait.ForListeningPort("8081"). + WithStartupTimeout(2*time.Minute). + WaitUntilReady(ctx, st); err != nil { + return err + } + + hostname, err := st.Host(ctx) + if err != nil { + return err + } + + return ws.waitFor(ctx, hostname) +} + +// waitFor waits until an endpoint is ready with an id response. +func (ws waitStrategy) waitFor(ctx context.Context, _ string) error { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(5 * time.Second): + return fmt.Errorf("server startup problem") + case <-ticker.C: + resp, err := http.Get(ws.endpoint) + if err != nil { + continue + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + continue + } + + if resp.Body.Close() != nil { + continue + } + + // The server needs a moment to generate some stats + if strings.Contains(string(body), "id") { + return nil + } + } + } +} diff --git a/receiver/flinkmetricsreceiver/internal/metadata/generated_metrics_v2.go b/receiver/flinkmetricsreceiver/internal/metadata/generated_metrics_v2.go new file mode 100644 index 0000000000000..f12d2f65e6d95 --- /dev/null +++ b/receiver/flinkmetricsreceiver/internal/metadata/generated_metrics_v2.go @@ -0,0 +1,2245 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "fmt" + "strconv" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +// MetricSettings provides common settings for a particular metric. +type MetricSettings struct { + Enabled bool `mapstructure:"enabled"` +} + +// MetricsSettings provides settings for flinkmetricsreceiver metrics. +type MetricsSettings struct { + FlinkJobCheckpointCount MetricSettings `mapstructure:"flink.job.checkpoint.count"` + FlinkJobCheckpointInProgress MetricSettings `mapstructure:"flink.job.checkpoint.in_progress"` + FlinkJobLastCheckpointSize MetricSettings `mapstructure:"flink.job.last_checkpoint.size"` + FlinkJobLastCheckpointTime MetricSettings `mapstructure:"flink.job.last_checkpoint.time"` + FlinkJobRestartCount MetricSettings `mapstructure:"flink.job.restart.count"` + FlinkJvmClassLoaderClassesLoaded MetricSettings `mapstructure:"flink.jvm.class_loader.classes_loaded"` + FlinkJvmCPULoad MetricSettings `mapstructure:"flink.jvm.cpu.load"` + FlinkJvmCPUTime MetricSettings `mapstructure:"flink.jvm.cpu.time"` + FlinkJvmGcCollectionsCount MetricSettings `mapstructure:"flink.jvm.gc.collections.count"` + FlinkJvmGcCollectionsTime MetricSettings `mapstructure:"flink.jvm.gc.collections.time"` + FlinkJvmMemoryDirectTotalCapacity MetricSettings `mapstructure:"flink.jvm.memory.direct.total_capacity"` + FlinkJvmMemoryDirectUsed MetricSettings `mapstructure:"flink.jvm.memory.direct.used"` + FlinkJvmMemoryHeapCommitted MetricSettings `mapstructure:"flink.jvm.memory.heap.committed"` + FlinkJvmMemoryHeapMax MetricSettings `mapstructure:"flink.jvm.memory.heap.max"` + FlinkJvmMemoryHeapUsed MetricSettings `mapstructure:"flink.jvm.memory.heap.used"` + FlinkJvmMemoryMappedTotalCapacity MetricSettings `mapstructure:"flink.jvm.memory.mapped.total_capacity"` + FlinkJvmMemoryMappedUsed MetricSettings `mapstructure:"flink.jvm.memory.mapped.used"` + FlinkJvmMemoryMetaspaceCommitted MetricSettings `mapstructure:"flink.jvm.memory.metaspace.committed"` + FlinkJvmMemoryMetaspaceMax MetricSettings `mapstructure:"flink.jvm.memory.metaspace.max"` + FlinkJvmMemoryMetaspaceUsed MetricSettings `mapstructure:"flink.jvm.memory.metaspace.used"` + FlinkJvmMemoryNonheapCommitted MetricSettings `mapstructure:"flink.jvm.memory.nonheap.committed"` + FlinkJvmMemoryNonheapMax MetricSettings `mapstructure:"flink.jvm.memory.nonheap.max"` + FlinkJvmMemoryNonheapUsed MetricSettings `mapstructure:"flink.jvm.memory.nonheap.used"` + FlinkJvmThreadsCount MetricSettings `mapstructure:"flink.jvm.threads.count"` + FlinkMemoryManagedTotal MetricSettings `mapstructure:"flink.memory.managed.total"` + FlinkMemoryManagedUsed MetricSettings `mapstructure:"flink.memory.managed.used"` + FlinkOperatorRecordCount MetricSettings `mapstructure:"flink.operator.record.count"` + FlinkOperatorWatermarkOutput MetricSettings `mapstructure:"flink.operator.watermark.output"` + FlinkTaskRecordCount MetricSettings `mapstructure:"flink.task.record.count"` +} + +func DefaultMetricsSettings() MetricsSettings { + return MetricsSettings{ + FlinkJobCheckpointCount: MetricSettings{ + Enabled: true, + }, + FlinkJobCheckpointInProgress: MetricSettings{ + Enabled: true, + }, + FlinkJobLastCheckpointSize: MetricSettings{ + Enabled: true, + }, + FlinkJobLastCheckpointTime: MetricSettings{ + Enabled: true, + }, + FlinkJobRestartCount: MetricSettings{ + Enabled: true, + }, + FlinkJvmClassLoaderClassesLoaded: MetricSettings{ + Enabled: true, + }, + FlinkJvmCPULoad: MetricSettings{ + Enabled: true, + }, + FlinkJvmCPUTime: MetricSettings{ + Enabled: true, + }, + FlinkJvmGcCollectionsCount: MetricSettings{ + Enabled: true, + }, + FlinkJvmGcCollectionsTime: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryDirectTotalCapacity: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryDirectUsed: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryHeapCommitted: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryHeapMax: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryHeapUsed: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryMappedTotalCapacity: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryMappedUsed: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryMetaspaceCommitted: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryMetaspaceMax: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryMetaspaceUsed: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryNonheapCommitted: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryNonheapMax: MetricSettings{ + Enabled: true, + }, + FlinkJvmMemoryNonheapUsed: MetricSettings{ + Enabled: true, + }, + FlinkJvmThreadsCount: MetricSettings{ + Enabled: true, + }, + FlinkMemoryManagedTotal: MetricSettings{ + Enabled: true, + }, + FlinkMemoryManagedUsed: MetricSettings{ + Enabled: true, + }, + FlinkOperatorRecordCount: MetricSettings{ + Enabled: true, + }, + FlinkOperatorWatermarkOutput: MetricSettings{ + Enabled: true, + }, + FlinkTaskRecordCount: MetricSettings{ + Enabled: true, + }, + } +} + +// AttributeCheckpoint specifies the a value checkpoint attribute. +type AttributeCheckpoint int + +const ( + _ AttributeCheckpoint = iota + AttributeCheckpointCompleted + AttributeCheckpointFailed +) + +// String returns the string representation of the AttributeCheckpoint. +func (av AttributeCheckpoint) String() string { + switch av { + case AttributeCheckpointCompleted: + return "completed" + case AttributeCheckpointFailed: + return "failed" + } + return "" +} + +// MapAttributeCheckpoint is a helper map of string to AttributeCheckpoint attribute value. +var MapAttributeCheckpoint = map[string]AttributeCheckpoint{ + "completed": AttributeCheckpointCompleted, + "failed": AttributeCheckpointFailed, +} + +// AttributeGarbageCollectorName specifies the a value garbage_collector_name attribute. +type AttributeGarbageCollectorName int + +const ( + _ AttributeGarbageCollectorName = iota + AttributeGarbageCollectorNamePSMarkSweep + AttributeGarbageCollectorNamePSScavenge + AttributeGarbageCollectorNameG1YoungGeneration + AttributeGarbageCollectorNameG1OldGeneration +) + +// String returns the string representation of the AttributeGarbageCollectorName. +func (av AttributeGarbageCollectorName) String() string { + switch av { + case AttributeGarbageCollectorNamePSMarkSweep: + return "PS_MarkSweep" + case AttributeGarbageCollectorNamePSScavenge: + return "PS_Scavenge" + case AttributeGarbageCollectorNameG1YoungGeneration: + return "G1_Young_Generation" + case AttributeGarbageCollectorNameG1OldGeneration: + return "G1_Old_Generation" + } + return "" +} + +// MapAttributeGarbageCollectorName is a helper map of string to AttributeGarbageCollectorName attribute value. +var MapAttributeGarbageCollectorName = map[string]AttributeGarbageCollectorName{ + "PS_MarkSweep": AttributeGarbageCollectorNamePSMarkSweep, + "PS_Scavenge": AttributeGarbageCollectorNamePSScavenge, + "G1_Young_Generation": AttributeGarbageCollectorNameG1YoungGeneration, + "G1_Old_Generation": AttributeGarbageCollectorNameG1OldGeneration, +} + +// AttributeRecord specifies the a value record attribute. +type AttributeRecord int + +const ( + _ AttributeRecord = iota + AttributeRecordIn + AttributeRecordOut + AttributeRecordDropped +) + +// String returns the string representation of the AttributeRecord. +func (av AttributeRecord) String() string { + switch av { + case AttributeRecordIn: + return "in" + case AttributeRecordOut: + return "out" + case AttributeRecordDropped: + return "dropped" + } + return "" +} + +// MapAttributeRecord is a helper map of string to AttributeRecord attribute value. +var MapAttributeRecord = map[string]AttributeRecord{ + "in": AttributeRecordIn, + "out": AttributeRecordOut, + "dropped": AttributeRecordDropped, +} + +type metricFlinkJobCheckpointCount struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.job.checkpoint.count metric with initial data. +func (m *metricFlinkJobCheckpointCount) init() { + m.data.SetName("flink.job.checkpoint.count") + m.data.SetDescription("The number of checkpoints completed or failed.") + m.data.SetUnit("{checkpoints}") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricFlinkJobCheckpointCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, checkpointAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert("checkpoint", pcommon.NewValueString(checkpointAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJobCheckpointCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJobCheckpointCount) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJobCheckpointCount(settings MetricSettings) metricFlinkJobCheckpointCount { + m := metricFlinkJobCheckpointCount{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJobCheckpointInProgress struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.job.checkpoint.in_progress metric with initial data. +func (m *metricFlinkJobCheckpointInProgress) init() { + m.data.SetName("flink.job.checkpoint.in_progress") + m.data.SetDescription("The number of checkpoints in progress.") + m.data.SetUnit("{checkpoints}") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJobCheckpointInProgress) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJobCheckpointInProgress) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJobCheckpointInProgress) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJobCheckpointInProgress(settings MetricSettings) metricFlinkJobCheckpointInProgress { + m := metricFlinkJobCheckpointInProgress{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJobLastCheckpointSize struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.job.last_checkpoint.size metric with initial data. +func (m *metricFlinkJobLastCheckpointSize) init() { + m.data.SetName("flink.job.last_checkpoint.size") + m.data.SetDescription("The total size of the last checkpoint.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJobLastCheckpointSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJobLastCheckpointSize) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJobLastCheckpointSize) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJobLastCheckpointSize(settings MetricSettings) metricFlinkJobLastCheckpointSize { + m := metricFlinkJobLastCheckpointSize{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJobLastCheckpointTime struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.job.last_checkpoint.time metric with initial data. +func (m *metricFlinkJobLastCheckpointTime) init() { + m.data.SetName("flink.job.last_checkpoint.time") + m.data.SetDescription("The end to end duration of the last checkpoint.") + m.data.SetUnit("ms") + m.data.SetDataType(pmetric.MetricDataTypeGauge) +} + +func (m *metricFlinkJobLastCheckpointTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJobLastCheckpointTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJobLastCheckpointTime) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJobLastCheckpointTime(settings MetricSettings) metricFlinkJobLastCheckpointTime { + m := metricFlinkJobLastCheckpointTime{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJobRestartCount struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.job.restart.count metric with initial data. +func (m *metricFlinkJobRestartCount) init() { + m.data.SetName("flink.job.restart.count") + m.data.SetDescription("The total number of restarts since this job was submitted, including full restarts and fine-grained restarts.") + m.data.SetUnit("{restarts}") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJobRestartCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJobRestartCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJobRestartCount) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJobRestartCount(settings MetricSettings) metricFlinkJobRestartCount { + m := metricFlinkJobRestartCount{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmClassLoaderClassesLoaded struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.class_loader.classes_loaded metric with initial data. +func (m *metricFlinkJvmClassLoaderClassesLoaded) init() { + m.data.SetName("flink.jvm.class_loader.classes_loaded") + m.data.SetDescription("The total number of classes loaded since the start of the JVM.") + m.data.SetUnit("{classes}") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmClassLoaderClassesLoaded) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmClassLoaderClassesLoaded) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmClassLoaderClassesLoaded) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmClassLoaderClassesLoaded(settings MetricSettings) metricFlinkJvmClassLoaderClassesLoaded { + m := metricFlinkJvmClassLoaderClassesLoaded{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmCPULoad struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.cpu.load metric with initial data. +func (m *metricFlinkJvmCPULoad) init() { + m.data.SetName("flink.jvm.cpu.load") + m.data.SetDescription("The CPU usage of the JVM for a jobmanager or taskmanager.") + m.data.SetUnit("%") + m.data.SetDataType(pmetric.MetricDataTypeGauge) +} + +func (m *metricFlinkJvmCPULoad) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmCPULoad) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmCPULoad) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmCPULoad(settings MetricSettings) metricFlinkJvmCPULoad { + m := metricFlinkJvmCPULoad{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmCPUTime struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.cpu.time metric with initial data. +func (m *metricFlinkJvmCPUTime) init() { + m.data.SetName("flink.jvm.cpu.time") + m.data.SetDescription("The CPU time used by the JVM for a jobmanager or taskmanager.") + m.data.SetUnit("ns") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmCPUTime) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmCPUTime) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmCPUTime(settings MetricSettings) metricFlinkJvmCPUTime { + m := metricFlinkJvmCPUTime{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmGcCollectionsCount struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.gc.collections.count metric with initial data. +func (m *metricFlinkJvmGcCollectionsCount) init() { + m.data.SetName("flink.jvm.gc.collections.count") + m.data.SetDescription("The total number of collections that have occurred.") + m.data.SetUnit("{collections}") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricFlinkJvmGcCollectionsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, garbageCollectorNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert("garbage_collector_name", pcommon.NewValueString(garbageCollectorNameAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmGcCollectionsCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmGcCollectionsCount) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmGcCollectionsCount(settings MetricSettings) metricFlinkJvmGcCollectionsCount { + m := metricFlinkJvmGcCollectionsCount{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmGcCollectionsTime struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.gc.collections.time metric with initial data. +func (m *metricFlinkJvmGcCollectionsTime) init() { + m.data.SetName("flink.jvm.gc.collections.time") + m.data.SetDescription("The total time spent performing garbage collection.") + m.data.SetUnit("ms") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricFlinkJvmGcCollectionsTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, garbageCollectorNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert("garbage_collector_name", pcommon.NewValueString(garbageCollectorNameAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmGcCollectionsTime) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmGcCollectionsTime) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmGcCollectionsTime(settings MetricSettings) metricFlinkJvmGcCollectionsTime { + m := metricFlinkJvmGcCollectionsTime{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryDirectTotalCapacity struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.direct.total_capacity metric with initial data. +func (m *metricFlinkJvmMemoryDirectTotalCapacity) init() { + m.data.SetName("flink.jvm.memory.direct.total_capacity") + m.data.SetDescription("The total capacity of all buffers in the direct buffer pool.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryDirectTotalCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryDirectTotalCapacity) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryDirectTotalCapacity) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryDirectTotalCapacity(settings MetricSettings) metricFlinkJvmMemoryDirectTotalCapacity { + m := metricFlinkJvmMemoryDirectTotalCapacity{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryDirectUsed struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.direct.used metric with initial data. +func (m *metricFlinkJvmMemoryDirectUsed) init() { + m.data.SetName("flink.jvm.memory.direct.used") + m.data.SetDescription("The amount of memory used by the JVM for the direct buffer pool.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryDirectUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryDirectUsed) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryDirectUsed) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryDirectUsed(settings MetricSettings) metricFlinkJvmMemoryDirectUsed { + m := metricFlinkJvmMemoryDirectUsed{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryHeapCommitted struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.heap.committed metric with initial data. +func (m *metricFlinkJvmMemoryHeapCommitted) init() { + m.data.SetName("flink.jvm.memory.heap.committed") + m.data.SetDescription("The amount of heap memory guaranteed to be available to the JVM.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryHeapCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryHeapCommitted) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryHeapCommitted) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryHeapCommitted(settings MetricSettings) metricFlinkJvmMemoryHeapCommitted { + m := metricFlinkJvmMemoryHeapCommitted{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryHeapMax struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.heap.max metric with initial data. +func (m *metricFlinkJvmMemoryHeapMax) init() { + m.data.SetName("flink.jvm.memory.heap.max") + m.data.SetDescription("The maximum amount of heap memory that can be used for memory management.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryHeapMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryHeapMax) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryHeapMax) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryHeapMax(settings MetricSettings) metricFlinkJvmMemoryHeapMax { + m := metricFlinkJvmMemoryHeapMax{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryHeapUsed struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.heap.used metric with initial data. +func (m *metricFlinkJvmMemoryHeapUsed) init() { + m.data.SetName("flink.jvm.memory.heap.used") + m.data.SetDescription("The amount of heap memory currently used.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryHeapUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryHeapUsed) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryHeapUsed) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryHeapUsed(settings MetricSettings) metricFlinkJvmMemoryHeapUsed { + m := metricFlinkJvmMemoryHeapUsed{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryMappedTotalCapacity struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.mapped.total_capacity metric with initial data. +func (m *metricFlinkJvmMemoryMappedTotalCapacity) init() { + m.data.SetName("flink.jvm.memory.mapped.total_capacity") + m.data.SetDescription("The number of buffers in the mapped buffer pool.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryMappedTotalCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryMappedTotalCapacity) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryMappedTotalCapacity) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryMappedTotalCapacity(settings MetricSettings) metricFlinkJvmMemoryMappedTotalCapacity { + m := metricFlinkJvmMemoryMappedTotalCapacity{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryMappedUsed struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.mapped.used metric with initial data. +func (m *metricFlinkJvmMemoryMappedUsed) init() { + m.data.SetName("flink.jvm.memory.mapped.used") + m.data.SetDescription("The amount of memory used by the JVM for the mapped buffer pool.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryMappedUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryMappedUsed) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryMappedUsed) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryMappedUsed(settings MetricSettings) metricFlinkJvmMemoryMappedUsed { + m := metricFlinkJvmMemoryMappedUsed{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryMetaspaceCommitted struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.metaspace.committed metric with initial data. +func (m *metricFlinkJvmMemoryMetaspaceCommitted) init() { + m.data.SetName("flink.jvm.memory.metaspace.committed") + m.data.SetDescription("The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryMetaspaceCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryMetaspaceCommitted) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryMetaspaceCommitted) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryMetaspaceCommitted(settings MetricSettings) metricFlinkJvmMemoryMetaspaceCommitted { + m := metricFlinkJvmMemoryMetaspaceCommitted{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryMetaspaceMax struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.metaspace.max metric with initial data. +func (m *metricFlinkJvmMemoryMetaspaceMax) init() { + m.data.SetName("flink.jvm.memory.metaspace.max") + m.data.SetDescription("The maximum amount of memory that can be used in the Metaspace memory pool.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryMetaspaceMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryMetaspaceMax) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryMetaspaceMax) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryMetaspaceMax(settings MetricSettings) metricFlinkJvmMemoryMetaspaceMax { + m := metricFlinkJvmMemoryMetaspaceMax{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryMetaspaceUsed struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.metaspace.used metric with initial data. +func (m *metricFlinkJvmMemoryMetaspaceUsed) init() { + m.data.SetName("flink.jvm.memory.metaspace.used") + m.data.SetDescription("The amount of memory currently used in the Metaspace memory pool.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryMetaspaceUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryMetaspaceUsed) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryMetaspaceUsed) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryMetaspaceUsed(settings MetricSettings) metricFlinkJvmMemoryMetaspaceUsed { + m := metricFlinkJvmMemoryMetaspaceUsed{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryNonheapCommitted struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.nonheap.committed metric with initial data. +func (m *metricFlinkJvmMemoryNonheapCommitted) init() { + m.data.SetName("flink.jvm.memory.nonheap.committed") + m.data.SetDescription("The amount of non-heap memory guaranteed to be available to the JVM.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryNonheapCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryNonheapCommitted) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryNonheapCommitted) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryNonheapCommitted(settings MetricSettings) metricFlinkJvmMemoryNonheapCommitted { + m := metricFlinkJvmMemoryNonheapCommitted{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryNonheapMax struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.nonheap.max metric with initial data. +func (m *metricFlinkJvmMemoryNonheapMax) init() { + m.data.SetName("flink.jvm.memory.nonheap.max") + m.data.SetDescription("The maximum amount of non-heap memory that can be used for memory management.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryNonheapMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryNonheapMax) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryNonheapMax) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryNonheapMax(settings MetricSettings) metricFlinkJvmMemoryNonheapMax { + m := metricFlinkJvmMemoryNonheapMax{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmMemoryNonheapUsed struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.memory.nonheap.used metric with initial data. +func (m *metricFlinkJvmMemoryNonheapUsed) init() { + m.data.SetName("flink.jvm.memory.nonheap.used") + m.data.SetDescription("The amount of non-heap memory currently used.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmMemoryNonheapUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmMemoryNonheapUsed) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmMemoryNonheapUsed) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmMemoryNonheapUsed(settings MetricSettings) metricFlinkJvmMemoryNonheapUsed { + m := metricFlinkJvmMemoryNonheapUsed{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkJvmThreadsCount struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.jvm.threads.count metric with initial data. +func (m *metricFlinkJvmThreadsCount) init() { + m.data.SetName("flink.jvm.threads.count") + m.data.SetDescription("The total number of live threads.") + m.data.SetUnit("{threads}") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkJvmThreadsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkJvmThreadsCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkJvmThreadsCount) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkJvmThreadsCount(settings MetricSettings) metricFlinkJvmThreadsCount { + m := metricFlinkJvmThreadsCount{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkMemoryManagedTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.memory.managed.total metric with initial data. +func (m *metricFlinkMemoryManagedTotal) init() { + m.data.SetName("flink.memory.managed.total") + m.data.SetDescription("The total amount of managed memory.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkMemoryManagedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkMemoryManagedTotal) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkMemoryManagedTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkMemoryManagedTotal(settings MetricSettings) metricFlinkMemoryManagedTotal { + m := metricFlinkMemoryManagedTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkMemoryManagedUsed struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.memory.managed.used metric with initial data. +func (m *metricFlinkMemoryManagedUsed) init() { + m.data.SetName("flink.memory.managed.used") + m.data.SetDescription("The amount of managed memory currently used.") + m.data.SetUnit("By") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) +} + +func (m *metricFlinkMemoryManagedUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkMemoryManagedUsed) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkMemoryManagedUsed) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkMemoryManagedUsed(settings MetricSettings) metricFlinkMemoryManagedUsed { + m := metricFlinkMemoryManagedUsed{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkOperatorRecordCount struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.operator.record.count metric with initial data. +func (m *metricFlinkOperatorRecordCount) init() { + m.data.SetName("flink.operator.record.count") + m.data.SetDescription("The number of records an operator has.") + m.data.SetUnit("{records}") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricFlinkOperatorRecordCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operatorNameAttributeValue string, recordAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert("operator_name", pcommon.NewValueString(operatorNameAttributeValue)) + dp.Attributes().Insert("record", pcommon.NewValueString(recordAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkOperatorRecordCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkOperatorRecordCount) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkOperatorRecordCount(settings MetricSettings) metricFlinkOperatorRecordCount { + m := metricFlinkOperatorRecordCount{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkOperatorWatermarkOutput struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.operator.watermark.output metric with initial data. +func (m *metricFlinkOperatorWatermarkOutput) init() { + m.data.SetName("flink.operator.watermark.output") + m.data.SetDescription("The last watermark this operator has emitted.") + m.data.SetUnit("ms") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricFlinkOperatorWatermarkOutput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operatorNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert("operator_name", pcommon.NewValueString(operatorNameAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkOperatorWatermarkOutput) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkOperatorWatermarkOutput) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkOperatorWatermarkOutput(settings MetricSettings) metricFlinkOperatorWatermarkOutput { + m := metricFlinkOperatorWatermarkOutput{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricFlinkTaskRecordCount struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills flink.task.record.count metric with initial data. +func (m *metricFlinkTaskRecordCount) init() { + m.data.SetName("flink.task.record.count") + m.data.SetDescription("The number of records a task has.") + m.data.SetUnit("{records}") + m.data.SetDataType(pmetric.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricFlinkTaskRecordCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, recordAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert("record", pcommon.NewValueString(recordAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricFlinkTaskRecordCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricFlinkTaskRecordCount) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricFlinkTaskRecordCount(settings MetricSettings) metricFlinkTaskRecordCount { + m := metricFlinkTaskRecordCount{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user settings. +type MetricsBuilder struct { + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information + metricFlinkJobCheckpointCount metricFlinkJobCheckpointCount + metricFlinkJobCheckpointInProgress metricFlinkJobCheckpointInProgress + metricFlinkJobLastCheckpointSize metricFlinkJobLastCheckpointSize + metricFlinkJobLastCheckpointTime metricFlinkJobLastCheckpointTime + metricFlinkJobRestartCount metricFlinkJobRestartCount + metricFlinkJvmClassLoaderClassesLoaded metricFlinkJvmClassLoaderClassesLoaded + metricFlinkJvmCPULoad metricFlinkJvmCPULoad + metricFlinkJvmCPUTime metricFlinkJvmCPUTime + metricFlinkJvmGcCollectionsCount metricFlinkJvmGcCollectionsCount + metricFlinkJvmGcCollectionsTime metricFlinkJvmGcCollectionsTime + metricFlinkJvmMemoryDirectTotalCapacity metricFlinkJvmMemoryDirectTotalCapacity + metricFlinkJvmMemoryDirectUsed metricFlinkJvmMemoryDirectUsed + metricFlinkJvmMemoryHeapCommitted metricFlinkJvmMemoryHeapCommitted + metricFlinkJvmMemoryHeapMax metricFlinkJvmMemoryHeapMax + metricFlinkJvmMemoryHeapUsed metricFlinkJvmMemoryHeapUsed + metricFlinkJvmMemoryMappedTotalCapacity metricFlinkJvmMemoryMappedTotalCapacity + metricFlinkJvmMemoryMappedUsed metricFlinkJvmMemoryMappedUsed + metricFlinkJvmMemoryMetaspaceCommitted metricFlinkJvmMemoryMetaspaceCommitted + metricFlinkJvmMemoryMetaspaceMax metricFlinkJvmMemoryMetaspaceMax + metricFlinkJvmMemoryMetaspaceUsed metricFlinkJvmMemoryMetaspaceUsed + metricFlinkJvmMemoryNonheapCommitted metricFlinkJvmMemoryNonheapCommitted + metricFlinkJvmMemoryNonheapMax metricFlinkJvmMemoryNonheapMax + metricFlinkJvmMemoryNonheapUsed metricFlinkJvmMemoryNonheapUsed + metricFlinkJvmThreadsCount metricFlinkJvmThreadsCount + metricFlinkMemoryManagedTotal metricFlinkMemoryManagedTotal + metricFlinkMemoryManagedUsed metricFlinkMemoryManagedUsed + metricFlinkOperatorRecordCount metricFlinkOperatorRecordCount + metricFlinkOperatorWatermarkOutput metricFlinkOperatorWatermarkOutput + metricFlinkTaskRecordCount metricFlinkTaskRecordCount +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(settings MetricsSettings, buildInfo component.BuildInfo, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: buildInfo, + metricFlinkJobCheckpointCount: newMetricFlinkJobCheckpointCount(settings.FlinkJobCheckpointCount), + metricFlinkJobCheckpointInProgress: newMetricFlinkJobCheckpointInProgress(settings.FlinkJobCheckpointInProgress), + metricFlinkJobLastCheckpointSize: newMetricFlinkJobLastCheckpointSize(settings.FlinkJobLastCheckpointSize), + metricFlinkJobLastCheckpointTime: newMetricFlinkJobLastCheckpointTime(settings.FlinkJobLastCheckpointTime), + metricFlinkJobRestartCount: newMetricFlinkJobRestartCount(settings.FlinkJobRestartCount), + metricFlinkJvmClassLoaderClassesLoaded: newMetricFlinkJvmClassLoaderClassesLoaded(settings.FlinkJvmClassLoaderClassesLoaded), + metricFlinkJvmCPULoad: newMetricFlinkJvmCPULoad(settings.FlinkJvmCPULoad), + metricFlinkJvmCPUTime: newMetricFlinkJvmCPUTime(settings.FlinkJvmCPUTime), + metricFlinkJvmGcCollectionsCount: newMetricFlinkJvmGcCollectionsCount(settings.FlinkJvmGcCollectionsCount), + metricFlinkJvmGcCollectionsTime: newMetricFlinkJvmGcCollectionsTime(settings.FlinkJvmGcCollectionsTime), + metricFlinkJvmMemoryDirectTotalCapacity: newMetricFlinkJvmMemoryDirectTotalCapacity(settings.FlinkJvmMemoryDirectTotalCapacity), + metricFlinkJvmMemoryDirectUsed: newMetricFlinkJvmMemoryDirectUsed(settings.FlinkJvmMemoryDirectUsed), + metricFlinkJvmMemoryHeapCommitted: newMetricFlinkJvmMemoryHeapCommitted(settings.FlinkJvmMemoryHeapCommitted), + metricFlinkJvmMemoryHeapMax: newMetricFlinkJvmMemoryHeapMax(settings.FlinkJvmMemoryHeapMax), + metricFlinkJvmMemoryHeapUsed: newMetricFlinkJvmMemoryHeapUsed(settings.FlinkJvmMemoryHeapUsed), + metricFlinkJvmMemoryMappedTotalCapacity: newMetricFlinkJvmMemoryMappedTotalCapacity(settings.FlinkJvmMemoryMappedTotalCapacity), + metricFlinkJvmMemoryMappedUsed: newMetricFlinkJvmMemoryMappedUsed(settings.FlinkJvmMemoryMappedUsed), + metricFlinkJvmMemoryMetaspaceCommitted: newMetricFlinkJvmMemoryMetaspaceCommitted(settings.FlinkJvmMemoryMetaspaceCommitted), + metricFlinkJvmMemoryMetaspaceMax: newMetricFlinkJvmMemoryMetaspaceMax(settings.FlinkJvmMemoryMetaspaceMax), + metricFlinkJvmMemoryMetaspaceUsed: newMetricFlinkJvmMemoryMetaspaceUsed(settings.FlinkJvmMemoryMetaspaceUsed), + metricFlinkJvmMemoryNonheapCommitted: newMetricFlinkJvmMemoryNonheapCommitted(settings.FlinkJvmMemoryNonheapCommitted), + metricFlinkJvmMemoryNonheapMax: newMetricFlinkJvmMemoryNonheapMax(settings.FlinkJvmMemoryNonheapMax), + metricFlinkJvmMemoryNonheapUsed: newMetricFlinkJvmMemoryNonheapUsed(settings.FlinkJvmMemoryNonheapUsed), + metricFlinkJvmThreadsCount: newMetricFlinkJvmThreadsCount(settings.FlinkJvmThreadsCount), + metricFlinkMemoryManagedTotal: newMetricFlinkMemoryManagedTotal(settings.FlinkMemoryManagedTotal), + metricFlinkMemoryManagedUsed: newMetricFlinkMemoryManagedUsed(settings.FlinkMemoryManagedUsed), + metricFlinkOperatorRecordCount: newMetricFlinkOperatorRecordCount(settings.FlinkOperatorRecordCount), + metricFlinkOperatorWatermarkOutput: newMetricFlinkOperatorWatermarkOutput(settings.FlinkOperatorWatermarkOutput), + metricFlinkTaskRecordCount: newMetricFlinkTaskRecordCount(settings.FlinkTaskRecordCount), + } + for _, op := range options { + op(mb) + } + return mb +} + +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { + if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceMetricsOption applies changes to provided resource metrics. +type ResourceMetricsOption func(pmetric.ResourceMetrics) + +// WithFlinkJobName sets provided value as "flink.job.name" attribute for current resource. +func WithFlinkJobName(val string) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + rm.Resource().Attributes().UpsertString("flink.job.name", val) + } +} + +// WithFlinkResourceType sets provided value as "flink.resource.type" attribute for current resource. +func WithFlinkResourceType(val string) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + rm.Resource().Attributes().UpsertString("flink.resource.type", val) + } +} + +// WithFlinkSubtaskIndex sets provided value as "flink.subtask.index" attribute for current resource. +func WithFlinkSubtaskIndex(val string) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + rm.Resource().Attributes().UpsertString("flink.subtask.index", val) + } +} + +// WithFlinkTaskName sets provided value as "flink.task.name" attribute for current resource. +func WithFlinkTaskName(val string) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + rm.Resource().Attributes().UpsertString("flink.task.name", val) + } +} + +// WithFlinkTaskmanagerID sets provided value as "flink.taskmanager.id" attribute for current resource. +func WithFlinkTaskmanagerID(val string) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + rm.Resource().Attributes().UpsertString("flink.taskmanager.id", val) + } +} + +// WithHostName sets provided value as "host.name" attribute for current resource. +func WithHostName(val string) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + rm.Resource().Attributes().UpsertString("host.name", val) + } +} + +// WithStartTimeOverride overrides start time for all the resource metrics data points. +// This option should be only used if different start time has to be set on metrics coming from different resources. +func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + var dps pmetric.NumberDataPointSlice + metrics := rm.ScopeMetrics().At(0).Metrics() + for i := 0; i < metrics.Len(); i++ { + switch metrics.At(i).DataType() { + case pmetric.MetricDataTypeGauge: + dps = metrics.At(i).Gauge().DataPoints() + case pmetric.MetricDataTypeSum: + dps = metrics.At(i).Sum().DataPoints() + } + for j := 0; j < dps.Len(); j++ { + dps.At(j).SetStartTimestamp(start) + } + } + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. +// Resource attributes should be provided as ResourceMetricsOption arguments. +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { + rm := pmetric.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + ils := rm.ScopeMetrics().AppendEmpty() + ils.Scope().SetName("otelcol/flinkmetricsreceiver") + ils.Scope().SetVersion(mb.buildInfo.Version) + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricFlinkJobCheckpointCount.emit(ils.Metrics()) + mb.metricFlinkJobCheckpointInProgress.emit(ils.Metrics()) + mb.metricFlinkJobLastCheckpointSize.emit(ils.Metrics()) + mb.metricFlinkJobLastCheckpointTime.emit(ils.Metrics()) + mb.metricFlinkJobRestartCount.emit(ils.Metrics()) + mb.metricFlinkJvmClassLoaderClassesLoaded.emit(ils.Metrics()) + mb.metricFlinkJvmCPULoad.emit(ils.Metrics()) + mb.metricFlinkJvmCPUTime.emit(ils.Metrics()) + mb.metricFlinkJvmGcCollectionsCount.emit(ils.Metrics()) + mb.metricFlinkJvmGcCollectionsTime.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryDirectTotalCapacity.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryDirectUsed.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryHeapCommitted.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryHeapMax.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryHeapUsed.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryMappedTotalCapacity.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryMappedUsed.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryMetaspaceCommitted.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryMetaspaceMax.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryMetaspaceUsed.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryNonheapCommitted.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryNonheapMax.emit(ils.Metrics()) + mb.metricFlinkJvmMemoryNonheapUsed.emit(ils.Metrics()) + mb.metricFlinkJvmThreadsCount.emit(ils.Metrics()) + mb.metricFlinkMemoryManagedTotal.emit(ils.Metrics()) + mb.metricFlinkMemoryManagedUsed.emit(ils.Metrics()) + mb.metricFlinkOperatorRecordCount.emit(ils.Metrics()) + mb.metricFlinkOperatorWatermarkOutput.emit(ils.Metrics()) + mb.metricFlinkTaskRecordCount.emit(ils.Metrics()) + for _, op := range rmo { + op(rm) + } + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) + metrics := pmetric.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics +} + +// RecordFlinkJobCheckpointCountDataPoint adds a data point to flink.job.checkpoint.count metric. +func (mb *MetricsBuilder) RecordFlinkJobCheckpointCountDataPoint(ts pcommon.Timestamp, inputVal string, checkpointAttributeValue AttributeCheckpoint) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJobCheckpointCount, value was %s: %w", inputVal, err) + } + mb.metricFlinkJobCheckpointCount.recordDataPoint(mb.startTime, ts, val, checkpointAttributeValue.String()) + return nil +} + +// RecordFlinkJobCheckpointInProgressDataPoint adds a data point to flink.job.checkpoint.in_progress metric. +func (mb *MetricsBuilder) RecordFlinkJobCheckpointInProgressDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJobCheckpointInProgress, value was %s: %w", inputVal, err) + } + mb.metricFlinkJobCheckpointInProgress.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJobLastCheckpointSizeDataPoint adds a data point to flink.job.last_checkpoint.size metric. +func (mb *MetricsBuilder) RecordFlinkJobLastCheckpointSizeDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJobLastCheckpointSize, value was %s: %w", inputVal, err) + } + mb.metricFlinkJobLastCheckpointSize.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJobLastCheckpointTimeDataPoint adds a data point to flink.job.last_checkpoint.time metric. +func (mb *MetricsBuilder) RecordFlinkJobLastCheckpointTimeDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJobLastCheckpointTime, value was %s: %w", inputVal, err) + } + mb.metricFlinkJobLastCheckpointTime.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJobRestartCountDataPoint adds a data point to flink.job.restart.count metric. +func (mb *MetricsBuilder) RecordFlinkJobRestartCountDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJobRestartCount, value was %s: %w", inputVal, err) + } + mb.metricFlinkJobRestartCount.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmClassLoaderClassesLoadedDataPoint adds a data point to flink.jvm.class_loader.classes_loaded metric. +func (mb *MetricsBuilder) RecordFlinkJvmClassLoaderClassesLoadedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmClassLoaderClassesLoaded, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmClassLoaderClassesLoaded.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmCPULoadDataPoint adds a data point to flink.jvm.cpu.load metric. +func (mb *MetricsBuilder) RecordFlinkJvmCPULoadDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseFloat(inputVal, 64) + if err != nil { + return fmt.Errorf("failed to parse float64 for FlinkJvmCPULoad, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmCPULoad.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmCPUTimeDataPoint adds a data point to flink.jvm.cpu.time metric. +func (mb *MetricsBuilder) RecordFlinkJvmCPUTimeDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmCPUTime, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmCPUTime.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmGcCollectionsCountDataPoint adds a data point to flink.jvm.gc.collections.count metric. +func (mb *MetricsBuilder) RecordFlinkJvmGcCollectionsCountDataPoint(ts pcommon.Timestamp, inputVal string, garbageCollectorNameAttributeValue AttributeGarbageCollectorName) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmGcCollectionsCount, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmGcCollectionsCount.recordDataPoint(mb.startTime, ts, val, garbageCollectorNameAttributeValue.String()) + return nil +} + +// RecordFlinkJvmGcCollectionsTimeDataPoint adds a data point to flink.jvm.gc.collections.time metric. +func (mb *MetricsBuilder) RecordFlinkJvmGcCollectionsTimeDataPoint(ts pcommon.Timestamp, inputVal string, garbageCollectorNameAttributeValue AttributeGarbageCollectorName) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmGcCollectionsTime, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmGcCollectionsTime.recordDataPoint(mb.startTime, ts, val, garbageCollectorNameAttributeValue.String()) + return nil +} + +// RecordFlinkJvmMemoryDirectTotalCapacityDataPoint adds a data point to flink.jvm.memory.direct.total_capacity metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryDirectTotalCapacityDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryDirectTotalCapacity, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryDirectTotalCapacity.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryDirectUsedDataPoint adds a data point to flink.jvm.memory.direct.used metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryDirectUsedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryDirectUsed, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryDirectUsed.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryHeapCommittedDataPoint adds a data point to flink.jvm.memory.heap.committed metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryHeapCommittedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryHeapCommitted, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryHeapCommitted.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryHeapMaxDataPoint adds a data point to flink.jvm.memory.heap.max metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryHeapMaxDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryHeapMax, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryHeapMax.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryHeapUsedDataPoint adds a data point to flink.jvm.memory.heap.used metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryHeapUsedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryHeapUsed, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryHeapUsed.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryMappedTotalCapacityDataPoint adds a data point to flink.jvm.memory.mapped.total_capacity metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryMappedTotalCapacityDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryMappedTotalCapacity, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryMappedTotalCapacity.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryMappedUsedDataPoint adds a data point to flink.jvm.memory.mapped.used metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryMappedUsedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryMappedUsed, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryMappedUsed.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryMetaspaceCommittedDataPoint adds a data point to flink.jvm.memory.metaspace.committed metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryMetaspaceCommittedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryMetaspaceCommitted, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryMetaspaceCommitted.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryMetaspaceMaxDataPoint adds a data point to flink.jvm.memory.metaspace.max metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryMetaspaceMaxDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryMetaspaceMax, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryMetaspaceMax.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryMetaspaceUsedDataPoint adds a data point to flink.jvm.memory.metaspace.used metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryMetaspaceUsedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryMetaspaceUsed, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryMetaspaceUsed.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryNonheapCommittedDataPoint adds a data point to flink.jvm.memory.nonheap.committed metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryNonheapCommittedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryNonheapCommitted, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryNonheapCommitted.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryNonheapMaxDataPoint adds a data point to flink.jvm.memory.nonheap.max metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryNonheapMaxDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryNonheapMax, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryNonheapMax.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmMemoryNonheapUsedDataPoint adds a data point to flink.jvm.memory.nonheap.used metric. +func (mb *MetricsBuilder) RecordFlinkJvmMemoryNonheapUsedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmMemoryNonheapUsed, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmMemoryNonheapUsed.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkJvmThreadsCountDataPoint adds a data point to flink.jvm.threads.count metric. +func (mb *MetricsBuilder) RecordFlinkJvmThreadsCountDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkJvmThreadsCount, value was %s: %w", inputVal, err) + } + mb.metricFlinkJvmThreadsCount.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkMemoryManagedTotalDataPoint adds a data point to flink.memory.managed.total metric. +func (mb *MetricsBuilder) RecordFlinkMemoryManagedTotalDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkMemoryManagedTotal, value was %s: %w", inputVal, err) + } + mb.metricFlinkMemoryManagedTotal.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkMemoryManagedUsedDataPoint adds a data point to flink.memory.managed.used metric. +func (mb *MetricsBuilder) RecordFlinkMemoryManagedUsedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkMemoryManagedUsed, value was %s: %w", inputVal, err) + } + mb.metricFlinkMemoryManagedUsed.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordFlinkOperatorRecordCountDataPoint adds a data point to flink.operator.record.count metric. +func (mb *MetricsBuilder) RecordFlinkOperatorRecordCountDataPoint(ts pcommon.Timestamp, inputVal string, operatorNameAttributeValue string, recordAttributeValue AttributeRecord) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkOperatorRecordCount, value was %s: %w", inputVal, err) + } + mb.metricFlinkOperatorRecordCount.recordDataPoint(mb.startTime, ts, val, operatorNameAttributeValue, recordAttributeValue.String()) + return nil +} + +// RecordFlinkOperatorWatermarkOutputDataPoint adds a data point to flink.operator.watermark.output metric. +func (mb *MetricsBuilder) RecordFlinkOperatorWatermarkOutputDataPoint(ts pcommon.Timestamp, inputVal string, operatorNameAttributeValue string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkOperatorWatermarkOutput, value was %s: %w", inputVal, err) + } + mb.metricFlinkOperatorWatermarkOutput.recordDataPoint(mb.startTime, ts, val, operatorNameAttributeValue) + return nil +} + +// RecordFlinkTaskRecordCountDataPoint adds a data point to flink.task.record.count metric. +func (mb *MetricsBuilder) RecordFlinkTaskRecordCountDataPoint(ts pcommon.Timestamp, inputVal string, recordAttributeValue AttributeRecord) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for FlinkTaskRecordCount, value was %s: %w", inputVal, err) + } + mb.metricFlinkTaskRecordCount.recordDataPoint(mb.startTime, ts, val, recordAttributeValue.String()) + return nil +} + +// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, +// and metrics builder should update its startTime and reset it's internal state accordingly. +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) + for _, op := range options { + op(mb) + } +} diff --git a/receiver/flinkmetricsreceiver/internal/mocks/client.go b/receiver/flinkmetricsreceiver/internal/mocks/client.go new file mode 100644 index 0000000000000..c1fca7127143b --- /dev/null +++ b/receiver/flinkmetricsreceiver/internal/mocks/client.go @@ -0,0 +1,108 @@ +// Code generated by mockery v2.9.4. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + models "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/models" +) + +// MockClient is an autogenerated mock type for the client type +type MockClient struct { + mock.Mock +} + +// GetJobmanagerMetrics provides a mock function with given fields: ctx +func (_m *MockClient) GetJobmanagerMetrics(ctx context.Context) (*models.JobmanagerMetrics, error) { + ret := _m.Called(ctx) + + var r0 *models.JobmanagerMetrics + if rf, ok := ret.Get(0).(func(context.Context) *models.JobmanagerMetrics); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.JobmanagerMetrics) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetJobsMetrics provides a mock function with given fields: ctx +func (_m *MockClient) GetJobsMetrics(ctx context.Context) ([]*models.JobMetrics, error) { + ret := _m.Called(ctx) + + var r0 []*models.JobMetrics + if rf, ok := ret.Get(0).(func(context.Context) []*models.JobMetrics); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.JobMetrics) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSubtasksMetrics provides a mock function with given fields: ctx +func (_m *MockClient) GetSubtasksMetrics(ctx context.Context) ([]*models.SubtaskMetrics, error) { + ret := _m.Called(ctx) + + var r0 []*models.SubtaskMetrics + if rf, ok := ret.Get(0).(func(context.Context) []*models.SubtaskMetrics); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.SubtaskMetrics) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTaskmanagersMetrics provides a mock function with given fields: ctx +func (_m *MockClient) GetTaskmanagersMetrics(ctx context.Context) ([]*models.TaskmanagerMetrics, error) { + ret := _m.Called(ctx) + + var r0 []*models.TaskmanagerMetrics + if rf, ok := ret.Get(0).(func(context.Context) []*models.TaskmanagerMetrics); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.TaskmanagerMetrics) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/receiver/flinkmetricsreceiver/internal/models/model.go b/receiver/flinkmetricsreceiver/internal/models/model.go new file mode 100644 index 0000000000000..820f8d1375ce8 --- /dev/null +++ b/receiver/flinkmetricsreceiver/internal/models/model.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/models" + +// These are processed metrics that are used to unique identify metrics from each scope source. +// See https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/ops/metrics/#system-scope + +// JobmanagerMetrics store metrics with associated identifier attributes. +type JobmanagerMetrics struct { + Host string + Metrics MetricsResponse +} + +// TaskmanagerMetrics store metrics with associated identifier attributes. +type TaskmanagerMetrics struct { + Host string + TaskmanagerID string + Metrics MetricsResponse +} + +// JobMetrics store metrics with associated identifier attributes. +type JobMetrics struct { + Host string + JobName string + Metrics MetricsResponse +} + +// SubtaskMetrics store metrics with associated identifier attributes. +type SubtaskMetrics struct { + Host string + TaskmanagerID string + JobName string + TaskName string + SubtaskIndex string + Metrics MetricsResponse +} diff --git a/receiver/flinkmetricsreceiver/internal/models/response_model.go b/receiver/flinkmetricsreceiver/internal/models/response_model.go new file mode 100644 index 0000000000000..75d9222f8cc66 --- /dev/null +++ b/receiver/flinkmetricsreceiver/internal/models/response_model.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/models" + +// MetricsResponse stores a response for the metric endpoints. +type MetricsResponse []struct { + ID string `json:"id"` + Value string `json:"value"` +} + +// TaskmanagerIDsResponse stores a response for the taskmanagers endpoint. +type TaskmanagerIDsResponse struct { + Taskmanagers []struct { + ID string `json:"id"` + } `json:"taskmanagers"` +} + +// JobOverviewResponse stores a response for the jobs overview endpoint. +type JobOverviewResponse struct { + Jobs []struct { + Jid string `json:"jid"` + Name string `json:"name"` + } `json:"jobs"` +} + +// JobsResponse stores a response for the jobs endpoint. +type JobsResponse struct { + Jobs []struct { + ID string `json:"id"` + } `json:"jobs"` +} + +// JobsWithIDResponse stores a response for the jobs with ID endpoint. +type JobsWithIDResponse struct { + Jid string `json:"jid"` + Name string `json:"name"` + Vertices []struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"vertices"` +} + +// VerticesResponse stores a response for the vertices endpoint. +type VerticesResponse struct { + ID string `json:"id"` + Name string `json:"name"` + Subtasks []struct { + Subtask int `json:"subtask"` + Host string `json:"host"` + TaskmanagerID string `json:"taskmanager-id"` + } `json:"subtasks"` +} diff --git a/receiver/flinkmetricsreceiver/metadata.yaml b/receiver/flinkmetricsreceiver/metadata.yaml new file mode 100644 index 0000000000000..5cf5da0f414c5 --- /dev/null +++ b/receiver/flinkmetricsreceiver/metadata.yaml @@ -0,0 +1,328 @@ +name: flinkmetricsreceiver + +resource_attributes: + # These resource attributes are Flinks system scope variables, which contains context information about metrics. These are required to uniquely identify incoming metrics as the same job can run multiple times concurrently. See https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/ops/metrics/#system-scope for more information. + host.name: + description: The host name. + type: string + flink.taskmanager.id: + description: The taskmanager ID. + type: string + flink.job.name: + description: The job name. + type: string + flink.task.name: + description: The task name. + type: string + flink.subtask.index: + description: The subtask index. + type: string + flink.resource.type: + description: The flink scope type in which a metric belongs to. + type: string + enum: [ jobmanager, taskmanager ] + +attributes: + operator_name: + description: The operator name. + type: string + garbage_collector_name: + description: The names for the parallel scavenge and garbage first garbage collectors. + type: string + enum: [ PS_MarkSweep, PS_Scavenge, G1_Young_Generation, G1_Old_Generation ] + checkpoint: + description: The number of checkpoints completed or that failed. + type: string + enum: [ completed, failed ] + record: + description: The number of records received in, sent out or dropped due to arriving late. + type: string + enum: [ in, out, dropped ] + +metrics: + flink.jvm.cpu.load: + enabled: true + description: The CPU usage of the JVM for a jobmanager or taskmanager. + unit: "%" + gauge: + value_type: double + input_type: string + attributes: [] + flink.jvm.cpu.time: + enabled: true + description: The CPU time used by the JVM for a jobmanager or taskmanager. + unit: ns + sum: + monotonic: true + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.heap.used: + enabled: true + description: The amount of heap memory currently used. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.heap.committed: + enabled: true + description: The amount of heap memory guaranteed to be available to the JVM. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.heap.max: + enabled: true + description: The maximum amount of heap memory that can be used for memory management. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.nonheap.used: + enabled: true + description: The amount of non-heap memory currently used. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.nonheap.committed: + enabled: true + description: The amount of non-heap memory guaranteed to be available to the JVM. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.nonheap.max: + enabled: true + description: The maximum amount of non-heap memory that can be used for memory management. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.metaspace.used: + enabled: true + description: The amount of memory currently used in the Metaspace memory pool. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.metaspace.committed: + enabled: true + description: The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.metaspace.max: + enabled: true + description: The maximum amount of memory that can be used in the Metaspace memory pool. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.direct.used: + enabled: true + description: The amount of memory used by the JVM for the direct buffer pool. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.direct.total_capacity: + enabled: true + description: The total capacity of all buffers in the direct buffer pool. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.mapped.used: + enabled: true + description: The amount of memory used by the JVM for the mapped buffer pool. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.memory.mapped.total_capacity: + enabled: true + description: The number of buffers in the mapped buffer pool. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.memory.managed.used: + enabled: true + description: The amount of managed memory currently used. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.memory.managed.total: + enabled: true + description: The total amount of managed memory. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.threads.count: + enabled: true + description: The total number of live threads. + unit: "{threads}" + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.jvm.gc.collections.count: + enabled: true + description: The total number of collections that have occurred. + unit: "{collections}" + sum: + monotonic: true + aggregation: cumulative + value_type: int + input_type: string + attributes: [ garbage_collector_name ] + flink.jvm.gc.collections.time: + enabled: true + description: The total time spent performing garbage collection. + unit: ms + sum: + monotonic: true + aggregation: cumulative + value_type: int + input_type: string + attributes: [ garbage_collector_name ] + flink.jvm.class_loader.classes_loaded: + enabled: true + description: The total number of classes loaded since the start of the JVM. + unit: "{classes}" + sum: + monotonic: true + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.job.restart.count: + enabled: true + description: The total number of restarts since this job was submitted, including full restarts and fine-grained restarts. + unit: "{restarts}" + sum: + monotonic: true + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.job.last_checkpoint.time: + enabled: true + description: The end to end duration of the last checkpoint. + unit: ms + gauge: + value_type: int + input_type: string + attributes: [] + flink.job.last_checkpoint.size: + enabled: true + description: The total size of the last checkpoint. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.job.checkpoint.count: + enabled: true + description: The number of checkpoints completed or failed. + unit: "{checkpoints}" + sum: + monotonic: true + aggregation: cumulative + value_type: int + input_type: string + attributes: [ checkpoint ] + flink.job.checkpoint.in_progress: + enabled: true + description: The number of checkpoints in progress. + unit: "{checkpoints}" + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [] + flink.task.record.count: + enabled: true + description: The number of records a task has. + unit: "{records}" + sum: + monotonic: true + aggregation: cumulative + value_type: int + input_type: string + attributes: [ record ] + flink.operator.record.count: + enabled: true + description: The number of records an operator has. + unit: "{records}" + sum: + monotonic: true + aggregation: cumulative + value_type: int + input_type: string + attributes: [ operator_name, record ] + flink.operator.watermark.output: + enabled: true + description: The last watermark this operator has emitted. + unit: ms + sum: + monotonic: false + aggregation: cumulative + value_type: int + input_type: string + attributes: [ operator_name ] diff --git a/receiver/flinkmetricsreceiver/process.go b/receiver/flinkmetricsreceiver/process.go new file mode 100644 index 0000000000000..543766aa19b31 --- /dev/null +++ b/receiver/flinkmetricsreceiver/process.go @@ -0,0 +1,202 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" + +import ( + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/models" +) + +func (s *flinkmetricsScraper) processJobmanagerMetrics(now pcommon.Timestamp, jobmanagerMetrics *models.JobmanagerMetrics) { + for _, metric := range jobmanagerMetrics.Metrics { + switch metric.ID { + case "Status.JVM.CPU.Load": + _ = s.mb.RecordFlinkJvmCPULoadDataPoint(now, metric.Value) + case "Status.JVM.GarbageCollector.PS_MarkSweep.Time": + _ = s.mb.RecordFlinkJvmGcCollectionsTimeDataPoint(now, metric.Value, metadata.AttributeGarbageCollectorNamePSMarkSweep) + case "Status.JVM.GarbageCollector.PS_Scavenge.Time": + _ = s.mb.RecordFlinkJvmGcCollectionsTimeDataPoint(now, metric.Value, metadata.AttributeGarbageCollectorNamePSScavenge) + case "Status.JVM.GarbageCollector.PS_MarkSweep.Count": + _ = s.mb.RecordFlinkJvmGcCollectionsCountDataPoint(now, metric.Value, metadata.AttributeGarbageCollectorNamePSMarkSweep) + case "Status.JVM.GarbageCollector.PS_Scavenge.Count": + _ = s.mb.RecordFlinkJvmGcCollectionsCountDataPoint(now, metric.Value, metadata.AttributeGarbageCollectorNamePSScavenge) + case "Status.Flink.Memory.Managed.Used": + _ = s.mb.RecordFlinkMemoryManagedUsedDataPoint(now, metric.Value) + case "Status.Flink.Memory.Managed.Total": + _ = s.mb.RecordFlinkMemoryManagedTotalDataPoint(now, metric.Value) + case "Status.JVM.Memory.Mapped.TotalCapacity": + _ = s.mb.RecordFlinkJvmMemoryMappedTotalCapacityDataPoint(now, metric.Value) + case "Status.JVM.Memory.Mapped.MemoryUsed": + _ = s.mb.RecordFlinkJvmMemoryMappedUsedDataPoint(now, metric.Value) + case "Status.JVM.CPU.Time": + _ = s.mb.RecordFlinkJvmCPUTimeDataPoint(now, metric.Value) + case "Status.JVM.Threads.Count": + _ = s.mb.RecordFlinkJvmThreadsCountDataPoint(now, metric.Value) + case "Status.JVM.Memory.Heap.Committed": + _ = s.mb.RecordFlinkJvmMemoryHeapCommittedDataPoint(now, metric.Value) + case "Status.JVM.Memory.Metaspace.Committed": + _ = s.mb.RecordFlinkJvmMemoryMetaspaceCommittedDataPoint(now, metric.Value) + case "Status.JVM.Memory.NonHeap.Max": + _ = s.mb.RecordFlinkJvmMemoryNonheapMaxDataPoint(now, metric.Value) + case "Status.JVM.Memory.NonHeap.Committed": + _ = s.mb.RecordFlinkJvmMemoryNonheapCommittedDataPoint(now, metric.Value) + case "Status.JVM.Memory.NonHeap.Used": + _ = s.mb.RecordFlinkJvmMemoryNonheapUsedDataPoint(now, metric.Value) + case "Status.JVM.Memory.Metaspace.Max": + _ = s.mb.RecordFlinkJvmMemoryMetaspaceMaxDataPoint(now, metric.Value) + case "Status.JVM.Memory.Direct.MemoryUsed": + _ = s.mb.RecordFlinkJvmMemoryDirectUsedDataPoint(now, metric.Value) + case "Status.JVM.Memory.Direct.TotalCapacity": + _ = s.mb.RecordFlinkJvmMemoryDirectTotalCapacityDataPoint(now, metric.Value) + case "Status.JVM.ClassLoader.ClassesLoaded": + _ = s.mb.RecordFlinkJvmClassLoaderClassesLoadedDataPoint(now, metric.Value) + case "Status.JVM.Memory.Metaspace.Used": + _ = s.mb.RecordFlinkJvmMemoryMetaspaceUsedDataPoint(now, metric.Value) + case "Status.JVM.Memory.Heap.Max": + _ = s.mb.RecordFlinkJvmMemoryHeapMaxDataPoint(now, metric.Value) + case "Status.JVM.Memory.Heap.Used": + _ = s.mb.RecordFlinkJvmMemoryHeapUsedDataPoint(now, metric.Value) + } + } + s.mb.EmitForResource( + metadata.WithHostName(jobmanagerMetrics.Host), + metadata.WithFlinkResourceType("jobmanager"), + ) +} + +func (s *flinkmetricsScraper) processTaskmanagerMetrics(now pcommon.Timestamp, taskmanagerMetricInstances []*models.TaskmanagerMetrics) { + for _, taskmanagerMetrics := range taskmanagerMetricInstances { + for _, metric := range taskmanagerMetrics.Metrics { + switch metric.ID { + case "Status.JVM.GarbageCollector.G1_Young_Generation.Count": + _ = s.mb.RecordFlinkJvmGcCollectionsCountDataPoint(now, metric.Value, metadata.AttributeGarbageCollectorNameG1YoungGeneration) + case "Status.JVM.GarbageCollector.G1_Old_Generation.Count": + _ = s.mb.RecordFlinkJvmGcCollectionsCountDataPoint(now, metric.Value, metadata.AttributeGarbageCollectorNameG1OldGeneration) + case "Status.JVM.GarbageCollector.G1_Old_Generation.Time": + _ = s.mb.RecordFlinkJvmGcCollectionsTimeDataPoint(now, metric.Value, metadata.AttributeGarbageCollectorNameG1OldGeneration) + case "Status.JVM.GarbageCollector.G1_Young_Generation.Time": + _ = s.mb.RecordFlinkJvmGcCollectionsTimeDataPoint(now, metric.Value, metadata.AttributeGarbageCollectorNameG1YoungGeneration) + case "Status.JVM.CPU.Load": + _ = s.mb.RecordFlinkJvmCPULoadDataPoint(now, metric.Value) + case "Status.Flink.Memory.Managed.Used": + _ = s.mb.RecordFlinkMemoryManagedUsedDataPoint(now, metric.Value) + case "Status.Flink.Memory.Managed.Total": + _ = s.mb.RecordFlinkMemoryManagedTotalDataPoint(now, metric.Value) + case "Status.JVM.Memory.Mapped.TotalCapacity": + _ = s.mb.RecordFlinkJvmMemoryMappedTotalCapacityDataPoint(now, metric.Value) + case "Status.JVM.Memory.Mapped.MemoryUsed": + _ = s.mb.RecordFlinkJvmMemoryMappedUsedDataPoint(now, metric.Value) + case "Status.JVM.CPU.Time": + _ = s.mb.RecordFlinkJvmCPUTimeDataPoint(now, metric.Value) + case "Status.JVM.Threads.Count": + _ = s.mb.RecordFlinkJvmThreadsCountDataPoint(now, metric.Value) + case "Status.JVM.Memory.Heap.Committed": + _ = s.mb.RecordFlinkJvmMemoryHeapCommittedDataPoint(now, metric.Value) + case "Status.JVM.Memory.Metaspace.Committed": + _ = s.mb.RecordFlinkJvmMemoryMetaspaceCommittedDataPoint(now, metric.Value) + case "Status.JVM.Memory.NonHeap.Max": + _ = s.mb.RecordFlinkJvmMemoryNonheapMaxDataPoint(now, metric.Value) + case "Status.JVM.Memory.NonHeap.Committed": + _ = s.mb.RecordFlinkJvmMemoryNonheapCommittedDataPoint(now, metric.Value) + case "Status.JVM.Memory.NonHeap.Used": + _ = s.mb.RecordFlinkJvmMemoryNonheapUsedDataPoint(now, metric.Value) + case "Status.JVM.Memory.Metaspace.Max": + _ = s.mb.RecordFlinkJvmMemoryMetaspaceMaxDataPoint(now, metric.Value) + case "Status.JVM.Memory.Direct.MemoryUsed": + _ = s.mb.RecordFlinkJvmMemoryDirectUsedDataPoint(now, metric.Value) + case "Status.JVM.Memory.Direct.TotalCapacity": + _ = s.mb.RecordFlinkJvmMemoryDirectTotalCapacityDataPoint(now, metric.Value) + case "Status.JVM.ClassLoader.ClassesLoaded": + _ = s.mb.RecordFlinkJvmClassLoaderClassesLoadedDataPoint(now, metric.Value) + case "Status.JVM.Memory.Metaspace.Used": + _ = s.mb.RecordFlinkJvmMemoryMetaspaceUsedDataPoint(now, metric.Value) + case "Status.JVM.Memory.Heap.Max": + _ = s.mb.RecordFlinkJvmMemoryHeapMaxDataPoint(now, metric.Value) + case "Status.JVM.Memory.Heap.Used": + _ = s.mb.RecordFlinkJvmMemoryHeapUsedDataPoint(now, metric.Value) + } + } + s.mb.EmitForResource( + metadata.WithHostName(taskmanagerMetrics.Host), + metadata.WithFlinkTaskmanagerID(taskmanagerMetrics.TaskmanagerID), + metadata.WithFlinkResourceType("taskmanager"), + ) + } +} + +func (s *flinkmetricsScraper) processJobsMetrics(now pcommon.Timestamp, jobsMetricsInstances []*models.JobMetrics) { + for _, jobsMetrics := range jobsMetricsInstances { + for _, metric := range jobsMetrics.Metrics { + switch metric.ID { + case "numRestarts": + _ = s.mb.RecordFlinkJobRestartCountDataPoint(now, metric.Value) + case "lastCheckpointSize": + _ = s.mb.RecordFlinkJobLastCheckpointSizeDataPoint(now, metric.Value) + case "lastCheckpointDuration": + _ = s.mb.RecordFlinkJobLastCheckpointTimeDataPoint(now, metric.Value) + case "numberOfInProgressCheckpoints": + _ = s.mb.RecordFlinkJobCheckpointInProgressDataPoint(now, metric.Value) + case "numberOfCompletedCheckpoints": + _ = s.mb.RecordFlinkJobCheckpointCountDataPoint(now, metric.Value, metadata.AttributeCheckpointCompleted) + case "numberOfFailedCheckpoints": + _ = s.mb.RecordFlinkJobCheckpointCountDataPoint(now, metric.Value, metadata.AttributeCheckpointFailed) + } + } + s.mb.EmitForResource( + metadata.WithHostName(jobsMetrics.Host), + metadata.WithFlinkJobName(jobsMetrics.JobName), + ) + } +} + +func (s *flinkmetricsScraper) processSubtaskMetrics(now pcommon.Timestamp, subtaskMetricsInstances []*models.SubtaskMetrics) { + for _, subtaskMetrics := range subtaskMetricsInstances { + for _, metric := range subtaskMetrics.Metrics { + switch { + // record task metrics + case metric.ID == "numRecordsIn": + _ = s.mb.RecordFlinkTaskRecordCountDataPoint(now, metric.Value, metadata.AttributeRecordIn) + case metric.ID == "numRecordsOut": + _ = s.mb.RecordFlinkTaskRecordCountDataPoint(now, metric.Value, metadata.AttributeRecordOut) + case metric.ID == "numLateRecordsDropped": + _ = s.mb.RecordFlinkTaskRecordCountDataPoint(now, metric.Value, metadata.AttributeRecordDropped) + // record operator metrics + case strings.Contains(metric.ID, ".numRecordsIn"): + operatorName := strings.Split(metric.ID, ".numRecordsIn") + _ = s.mb.RecordFlinkOperatorRecordCountDataPoint(now, metric.Value, operatorName[0], metadata.AttributeRecordIn) + case strings.Contains(metric.ID, ".numRecordsOut"): + operatorName := strings.Split(metric.ID, ".numRecordsOut") + _ = s.mb.RecordFlinkOperatorRecordCountDataPoint(now, metric.Value, operatorName[0], metadata.AttributeRecordOut) + case strings.Contains(metric.ID, ".numLateRecordsDropped"): + operatorName := strings.Split(metric.ID, ".numLateRecordsDropped") + _ = s.mb.RecordFlinkOperatorRecordCountDataPoint(now, metric.Value, operatorName[0], metadata.AttributeRecordDropped) + case strings.Contains(metric.ID, ".currentOutputWatermark"): + operatorName := strings.Split(metric.ID, ".currentOutputWatermark") + _ = s.mb.RecordFlinkOperatorWatermarkOutputDataPoint(now, metric.Value, operatorName[0]) + } + } + s.mb.EmitForResource( + metadata.WithHostName(subtaskMetrics.Host), + metadata.WithFlinkTaskmanagerID(subtaskMetrics.TaskmanagerID), + metadata.WithFlinkJobName(subtaskMetrics.JobName), + metadata.WithFlinkTaskName(subtaskMetrics.TaskName), + metadata.WithFlinkSubtaskIndex(subtaskMetrics.SubtaskIndex), + ) + } +} diff --git a/receiver/flinkmetricsreceiver/scraper.go b/receiver/flinkmetricsreceiver/scraper.go new file mode 100644 index 0000000000000..9acd305c3f9a5 --- /dev/null +++ b/receiver/flinkmetricsreceiver/scraper.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" + +import ( + "context" + "errors" + "fmt" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/scrapererror" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/metadata" +) + +var ( + errClientNotInit = errors.New("client not initialized") + jobmanagerFailedFetch = "Failed to fetch jobmanager metrics" + taskmanagerFailedFetch = "Failed to fetch taskmanager metrics" + jobsFailedFetch = "Failed to fetch jobs metrics" + subtasksFailedFetch = "Failed to fetch subtasks metrics" +) + +type flinkmetricsScraper struct { + client client + cfg *Config + settings component.TelemetrySettings + mb *metadata.MetricsBuilder +} + +func newflinkScraper(config *Config, settings component.ReceiverCreateSettings) *flinkmetricsScraper { + return &flinkmetricsScraper{ + settings: settings.TelemetrySettings, + cfg: config, + mb: metadata.NewMetricsBuilder(config.Metrics, settings.BuildInfo), + } +} + +func (s *flinkmetricsScraper) start(_ context.Context, host component.Host) error { + httpClient, err := newClient(s.cfg, host, s.settings, s.settings.Logger) + if err != nil { + return fmt.Errorf("create client: %w", err) + } + s.client = httpClient + return nil +} + +func (s *flinkmetricsScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { + // Validate we don't attempt to scrape without initializing the client + if s.client == nil { + return pmetric.NewMetrics(), errClientNotInit + } + + now := pcommon.NewTimestampFromTime(time.Now()) + var scraperErrors scrapererror.ScrapeErrors + + jobmanagerMetrics, err := s.client.GetJobmanagerMetrics(ctx) + if err != nil { + s.settings.Logger.Error(jobmanagerFailedFetch, zap.Error(err)) + scraperErrors.AddPartial(1, fmt.Errorf("%s %w", jobmanagerFailedFetch, err)) + } + + taskmanagersMetrics, err := s.client.GetTaskmanagersMetrics(ctx) + if err != nil { + s.settings.Logger.Error(taskmanagerFailedFetch, zap.Error(err)) + scraperErrors.AddPartial(1, fmt.Errorf("%s %w", taskmanagerFailedFetch, err)) + } + + jobsMetrics, err := s.client.GetJobsMetrics(ctx) + if err != nil { + s.settings.Logger.Error(jobsFailedFetch, zap.Error(err)) + scraperErrors.AddPartial(1, fmt.Errorf("%s %w", jobsFailedFetch, err)) + } + subtasksMetrics, err := s.client.GetSubtasksMetrics(ctx) + if err != nil { + s.settings.Logger.Error(subtasksFailedFetch, zap.Error(err)) + scraperErrors.AddPartial(1, fmt.Errorf("%s %w", subtasksFailedFetch, err)) + } + + s.processJobmanagerMetrics(now, jobmanagerMetrics) + s.processTaskmanagerMetrics(now, taskmanagersMetrics) + s.processJobsMetrics(now, jobsMetrics) + s.processSubtaskMetrics(now, subtasksMetrics) + + return s.mb.Emit(), scraperErrors.Combine() +} diff --git a/receiver/flinkmetricsreceiver/scraper_test.go b/receiver/flinkmetricsreceiver/scraper_test.go new file mode 100644 index 0000000000000..e1509b899afac --- /dev/null +++ b/receiver/flinkmetricsreceiver/scraper_test.go @@ -0,0 +1,275 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// nolint:errcheck +package flinkmetricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver" + +import ( + "context" + "encoding/json" + "errors" + "path/filepath" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configtls" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/mocks" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver/internal/models" +) + +var ( + mockJobmanagerMetrics = "mock_jobmanager_metrics.json" + mockTaskmanagerMetrics = "mock_taskmanager_metrics.json" + mockJobsMetrics = "mock_jobs_metrics.json" + mockSubtaskMetrics = "mock_subtask_metrics.json" + + mockResponses = "mockresponses" +) + +func TestScraperStart(t *testing.T) { + testcases := []struct { + desc string + scraper *flinkmetricsScraper + expectError bool + }{ + { + desc: "Bad Config", + scraper: &flinkmetricsScraper{ + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: defaultEndpoint, + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/non/existent", + }, + }, + }, + }, + settings: componenttest.NewNopTelemetrySettings(), + }, + expectError: true, + }, + { + desc: "Valid Config", + scraper: &flinkmetricsScraper{ + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + TLSSetting: configtls.TLSClientSetting{}, + Endpoint: defaultEndpoint, + }, + }, + settings: componenttest.NewNopTelemetrySettings(), + }, + expectError: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.desc, func(t *testing.T) { + err := tc.scraper.start(context.Background(), componenttest.NewNopHost()) + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestScraperScrape(t *testing.T) { + // use helper function from client tests + jobmanagerMetricValuesData := loadAPIResponseData(t, mockResponses, mockJobmanagerMetrics) + taskmanagerMetricValuesData := loadAPIResponseData(t, mockResponses, mockTaskmanagerMetrics) + jobsMetricValuesData := loadAPIResponseData(t, mockResponses, mockJobsMetrics) + subtaskMetricValuesData := loadAPIResponseData(t, mockResponses, mockSubtaskMetrics) + + // unmarshal api responses into a metrics response + var jobmanagerMetricsResponse *models.MetricsResponse + var taskmanagerMetricsResponse *models.MetricsResponse + var jobsMetricsResponse *models.MetricsResponse + var subtaskMetricsResponse *models.MetricsResponse + err := json.Unmarshal(jobmanagerMetricValuesData, &jobmanagerMetricsResponse) + require.NoError(t, err) + err = json.Unmarshal(taskmanagerMetricValuesData, &taskmanagerMetricsResponse) + require.NoError(t, err) + err = json.Unmarshal(jobsMetricValuesData, &jobsMetricsResponse) + require.NoError(t, err) + err = json.Unmarshal(subtaskMetricValuesData, &subtaskMetricsResponse) + require.NoError(t, err) + + // populate scope metrics with attributes using the scope metrics response + jobmanagerMetrics := models.JobmanagerMetrics{ + Host: "mock-host", + Metrics: *jobmanagerMetricsResponse, + } + + taskmanagerMetricsInstances := []*models.TaskmanagerMetrics{} + taskmanagerMetricsInstances = append(taskmanagerMetricsInstances, &models.TaskmanagerMetrics{ + Host: "mock-host", + TaskmanagerID: "mock-taskmanager-id", + Metrics: *taskmanagerMetricsResponse, + }) + taskmanagerMetricsInstances = append(taskmanagerMetricsInstances, &models.TaskmanagerMetrics{ + Host: "mock-host2", + TaskmanagerID: "mock-taskmanager-id2", + Metrics: *taskmanagerMetricsResponse, + }) + + jobsMetricsInstances := []*models.JobMetrics{} + jobsMetricsInstances = append(jobsMetricsInstances, &models.JobMetrics{ + Host: "mock-host", + JobName: "mock-job-name", + Metrics: *jobsMetricsResponse, + }) + jobsMetricsInstances = append(jobsMetricsInstances, &models.JobMetrics{ + Host: "mock-host2", + JobName: "mock-job-name2", + Metrics: *jobsMetricsResponse, + }) + + subtaskMetricsInstances := []*models.SubtaskMetrics{} + subtaskMetricsInstances = append(subtaskMetricsInstances, &models.SubtaskMetrics{ + Host: "mock-host", + TaskmanagerID: "mock-taskmanager-id", + JobName: "mock-job-name", + TaskName: "mock-task-name", + SubtaskIndex: "mock-subtask-index", + Metrics: *subtaskMetricsResponse, + }) + + testCases := []struct { + desc string + setupMockClient func(t *testing.T) client + expectedMetricFile string + expectedErr error + }{ + { + desc: "Nil client", + setupMockClient: func(t *testing.T) client { + return nil + }, + expectedMetricFile: filepath.Join("testdata", "expected_metrics", "no_metrics.json"), + expectedErr: errClientNotInit, + }, + { + desc: "API Call Failure on Jobmanagers", + setupMockClient: func(t *testing.T) client { + mockClient := mocks.MockClient{} + mockClient.On("GetJobmanagerMetrics", mock.Anything).Return(&models.JobmanagerMetrics{}, errors.New("some api error")) + mockClient.On("GetTaskmanagersMetrics", mock.Anything).Return(nil, nil) + mockClient.On("GetJobsMetrics", mock.Anything).Return(nil, nil) + mockClient.On("GetSubtasksMetrics", mock.Anything).Return(nil, nil) + return &mockClient + }, + expectedMetricFile: filepath.Join("testdata", "expected_metrics", "no_metrics.json"), + expectedErr: errors.New(jobmanagerFailedFetch + " some api error"), + }, + { + desc: "API Call Failure on Taskmanagers", + setupMockClient: func(t *testing.T) client { + mockClient := mocks.MockClient{} + mockClient.On("GetJobmanagerMetrics", mock.Anything).Return(&jobmanagerMetrics, nil) + mockClient.On("GetTaskmanagersMetrics", mock.Anything).Return(nil, errors.New("some api error")) + mockClient.On("GetJobsMetrics", mock.Anything).Return(nil, nil) + mockClient.On("GetSubtasksMetrics", mock.Anything).Return(nil, nil) + return &mockClient + }, + expectedMetricFile: filepath.Join("testdata", "expected_metrics", "partial_metrics_no_taskmanagers.json"), + expectedErr: errors.New(taskmanagerFailedFetch + " some api error"), + }, + { + desc: "API Call Failure on Jobs", + setupMockClient: func(t *testing.T) client { + mockClient := mocks.MockClient{} + mockClient.On("GetJobmanagerMetrics", mock.Anything).Return(&jobmanagerMetrics, nil) + mockClient.On("GetTaskmanagersMetrics", mock.Anything).Return(taskmanagerMetricsInstances, nil) + mockClient.On("GetJobsMetrics", mock.Anything).Return(nil, errors.New("some api error")) + mockClient.On("GetSubtasksMetrics", mock.Anything).Return(nil, nil) + return &mockClient + }, + expectedMetricFile: filepath.Join("testdata", "expected_metrics", "partial_metrics_no_jobs.json"), + expectedErr: errors.New(jobsFailedFetch + " some api error"), + }, + { + desc: "API Call Failure on Subtasks", + setupMockClient: func(t *testing.T) client { + mockClient := mocks.MockClient{} + mockClient.On("GetJobmanagerMetrics", mock.Anything).Return(&jobmanagerMetrics, nil) + mockClient.On("GetTaskmanagersMetrics", mock.Anything).Return(taskmanagerMetricsInstances, nil) + mockClient.On("GetJobsMetrics", mock.Anything).Return(jobsMetricsInstances, nil) + mockClient.On("GetSubtasksMetrics", mock.Anything).Return(nil, errors.New("some api error")) + return &mockClient + }, + expectedMetricFile: filepath.Join("testdata", "expected_metrics", "partial_metrics_no_subtasks.json"), + expectedErr: errors.New(subtasksFailedFetch + " some api error"), + }, + { + desc: "Successful Collection no jobs running", + setupMockClient: func(t *testing.T) client { + mockClient := mocks.MockClient{} + jobsEmptyInstances := []*models.JobMetrics{} + subtaskEmptyInstances := []*models.SubtaskMetrics{} + require.NoError(t, err) + mockClient.On("GetJobmanagerMetrics", mock.Anything).Return(&jobmanagerMetrics, nil) + mockClient.On("GetTaskmanagersMetrics", mock.Anything).Return(taskmanagerMetricsInstances, nil) + mockClient.On("GetJobsMetrics", mock.Anything).Return(jobsEmptyInstances, nil) + mockClient.On("GetSubtasksMetrics", mock.Anything).Return(subtaskEmptyInstances, nil) + return &mockClient + }, + expectedMetricFile: filepath.Join("testdata", "expected_metrics", "metrics_no_jobs_golden.json"), + expectedErr: nil, + }, + { + desc: "Successful Collection", + setupMockClient: func(t *testing.T) client { + mockClient := mocks.MockClient{} + + // mock client calls + mockClient.On("GetJobmanagerMetrics", mock.Anything).Return(&jobmanagerMetrics, nil) + mockClient.On("GetTaskmanagersMetrics", mock.Anything).Return(taskmanagerMetricsInstances, nil) + mockClient.On("GetJobsMetrics", mock.Anything).Return(jobsMetricsInstances, nil) + mockClient.On("GetSubtasksMetrics", mock.Anything).Return(subtaskMetricsInstances, nil) + + return &mockClient + }, + expectedMetricFile: filepath.Join("testdata", "expected_metrics", "metrics_golden.json"), + expectedErr: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + scraper := newflinkScraper(createDefaultConfig().(*Config), componenttest.NewNopReceiverCreateSettings()) + scraper.client = tc.setupMockClient(t) + actualMetrics, err := scraper.scrape(context.Background()) + + if tc.expectedErr == nil { + require.NoError(t, err) + } else { + require.EqualError(t, err, tc.expectedErr.Error()) + } + + expectedMetrics, err := golden.ReadMetrics(tc.expectedMetricFile) + require.NoError(t, err) + + scrapertest.CompareMetrics(expectedMetrics, actualMetrics) + }) + } +} diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/jobmanager_metric_names.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobmanager_metric_names.json new file mode 100644 index 0000000000000..382c37d52e002 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobmanager_metric_names.json @@ -0,0 +1,86 @@ +[ + { + "id": "Status.JVM.GarbageCollector.PS_MarkSweep.Time" + }, + { + "id": "Status.JVM.Memory.Mapped.TotalCapacity" + }, + { + "id": "taskSlotsAvailable" + }, + { + "id": "taskSlotsTotal" + }, + { + "id": "Status.JVM.Memory.Mapped.MemoryUsed" + }, + { + "id": "Status.JVM.CPU.Time" + }, + { + "id": "Status.JVM.Threads.Count" + }, + { + "id": "Status.JVM.Memory.Heap.Committed" + }, + { + "id": "Status.JVM.Memory.Metaspace.Committed" + }, + { + "id": "Status.JVM.GarbageCollector.PS_Scavenge.Time" + }, + { + "id": "Status.JVM.GarbageCollector.PS_MarkSweep.Count" + }, + { + "id": "Status.JVM.Memory.Direct.Count" + }, + { + "id": "Status.JVM.GarbageCollector.PS_Scavenge.Count" + }, + { + "id": "Status.JVM.Memory.NonHeap.Max" + }, + { + "id": "numRegisteredTaskManagers" + }, + { + "id": "Status.JVM.Memory.NonHeap.Committed" + }, + { + "id": "Status.JVM.Memory.NonHeap.Used" + }, + { + "id": "Status.JVM.Memory.Metaspace.Max" + }, + { + "id": "Status.JVM.Memory.Direct.MemoryUsed" + }, + { + "id": "Status.JVM.Memory.Direct.TotalCapacity" + }, + { + "id": "numRunningJobs" + }, + { + "id": "Status.JVM.ClassLoader.ClassesLoaded" + }, + { + "id": "Status.JVM.Memory.Mapped.Count" + }, + { + "id": "Status.JVM.Memory.Metaspace.Used" + }, + { + "id": "Status.JVM.CPU.Load" + }, + { + "id": "Status.JVM.Memory.Heap.Max" + }, + { + "id": "Status.JVM.Memory.Heap.Used" + }, + { + "id": "Status.JVM.ClassLoader.ClassesUnloaded" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/jobmanager_metric_values.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobmanager_metric_values.json new file mode 100644 index 0000000000000..595af6205b337 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobmanager_metric_values.json @@ -0,0 +1,114 @@ +[ + { + "id": "Status.JVM.GarbageCollector.PS_MarkSweep.Time", + "value": "141" + }, + { + "id": "Status.JVM.Memory.Mapped.TotalCapacity", + "value": "0" + }, + { + "id": "taskSlotsAvailable", + "value": "7" + }, + { + "id": "taskSlotsTotal", + "value": "8" + }, + { + "id": "Status.JVM.Memory.Mapped.MemoryUsed", + "value": "0" + }, + { + "id": "Status.JVM.CPU.Time", + "value": "574220000000" + }, + { + "id": "Status.JVM.Threads.Count", + "value": "94" + }, + { + "id": "Status.JVM.Memory.Heap.Committed", + "value": "1047527424" + }, + { + "id": "Status.JVM.Memory.Metaspace.Committed", + "value": "66322432" + }, + { + "id": "Status.JVM.GarbageCollector.PS_Scavenge.Time", + "value": "293" + }, + { + "id": "Status.JVM.GarbageCollector.PS_MarkSweep.Count", + "value": "3" + }, + { + "id": "Status.JVM.Memory.Direct.Count", + "value": "20" + }, + { + "id": "Status.JVM.GarbageCollector.PS_Scavenge.Count", + "value": "18" + }, + { + "id": "Status.JVM.Memory.NonHeap.Max", + "value": "780140544" + }, + { + "id": "numRegisteredTaskManagers", + "value": "1" + }, + { + "id": "Status.JVM.Memory.NonHeap.Committed", + "value": "100794368" + }, + { + "id": "Status.JVM.Memory.NonHeap.Used", + "value": "94512880" + }, + { + "id": "Status.JVM.Memory.Metaspace.Max", + "value": "268435456" + }, + { + "id": "Status.JVM.Memory.Direct.MemoryUsed", + "value": "625945" + }, + { + "id": "Status.JVM.Memory.Direct.TotalCapacity", + "value": "625944" + }, + { + "id": "numRunningJobs", + "value": "1" + }, + { + "id": "Status.JVM.ClassLoader.ClassesLoaded", + "value": "10371" + }, + { + "id": "Status.JVM.Memory.Mapped.Count", + "value": "0" + }, + { + "id": "Status.JVM.Memory.Metaspace.Used", + "value": "61335024" + }, + { + "id": "Status.JVM.CPU.Load", + "value": "0.0056257957540046775" + }, + { + "id": "Status.JVM.Memory.Heap.Max", + "value": "1047527424" + }, + { + "id": "Status.JVM.Memory.Heap.Used", + "value": "208824352" + }, + { + "id": "Status.JVM.ClassLoader.ClassesUnloaded", + "value": "6" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_ids.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_ids.json new file mode 100644 index 0000000000000..b75fbe22eaca7 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_ids.json @@ -0,0 +1,8 @@ +{ + "jobs": [ + { + "id": "54a5c6e527e00e1bb861272a39fe13e4", + "status": "RUNNING" + } + ] +} diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_metric_names.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_metric_names.json new file mode 100644 index 0000000000000..26d73504a5df8 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_metric_names.json @@ -0,0 +1,47 @@ +[ + { + "id": "numberOfFailedCheckpoints" + }, + { + "id": "lastCheckpointSize" + }, + { + "id": "lastCheckpointExternalPath" + }, + { + "id": "totalNumberOfCheckpoints" + }, + { + "id": "lastCheckpointRestoreTimestamp" + }, + { + "id": "uptime" + }, + { + "id": "restartingTime" + }, + { + "id": "numberOfInProgressCheckpoints" + }, + { + "id": "downtime" + }, + { + "id": "lastCheckpointProcessedData" + }, + { + "id": "numberOfCompletedCheckpoints" + }, + { + "id": "numRestarts" + }, + { + "id": "fullRestarts" + }, + { + "id": "lastCheckpointDuration" + }, + { + "id": "lastCheckpointPersistedData" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_metric_values.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_metric_values.json new file mode 100644 index 0000000000000..101c21b29d7e0 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_metric_values.json @@ -0,0 +1,62 @@ +[ + { + "id": "numberOfFailedCheckpoints", + "value": "0" + }, + { + "id": "lastCheckpointSize", + "value": "15088" + }, + { + "id": "lastCheckpointExternalPath", + "value": "" + }, + { + "id": "totalNumberOfCheckpoints", + "value": "6641" + }, + { + "id": "lastCheckpointRestoreTimestamp", + "value": "1651445646439" + }, + { + "id": "uptime", + "value": "16863615" + }, + { + "id": "restartingTime", + "value": "1024" + }, + { + "id": "numberOfInProgressCheckpoints", + "value": "0" + }, + { + "id": "downtime", + "value": "0" + }, + { + "id": "lastCheckpointProcessedData", + "value": "0" + }, + { + "id": "numberOfCompletedCheckpoints", + "value": "6641" + }, + { + "id": "numRestarts", + "value": "1" + }, + { + "id": "fullRestarts", + "value": "1" + }, + { + "id": "lastCheckpointDuration", + "value": "7" + }, + { + "id": "lastCheckpointPersistedData", + "value": "0" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_overview.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_overview.json new file mode 100644 index 0000000000000..3aead13d020c4 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_overview.json @@ -0,0 +1,26 @@ +{ + "jobs": [ + { + "jid": "54a5c6e527e00e1bb861272a39fe13e4", + "name": "State machine job", + "state": "RUNNING", + "start-time": 1651442575208, + "end-time": -1, + "duration": 19888604, + "last-modification": 1651445646545, + "tasks": { + "total": 2, + "created": 0, + "scheduled": 0, + "deploying": 0, + "running": 2, + "finished": 0, + "canceling": 0, + "canceled": 0, + "failed": 0, + "reconciling": 0, + "initializing": 0 + } + } + ] +} diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_with_id.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_with_id.json new file mode 100644 index 0000000000000..0c419f7660031 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/jobs_with_id.json @@ -0,0 +1,133 @@ +{ + "jid": "54a5c6e527e00e1bb861272a39fe13e4", + "name": "State machine job", + "isStoppable": false, + "state": "RUNNING", + "start-time": 1651442575208, + "end-time": -1, + "duration": 19918001, + "maxParallelism": -1, + "now": 1651462493209, + "timestamps": { + "CANCELED": 0, + "RESTARTING": 1651445645389, + "FAILED": 0, + "SUSPENDED": 0, + "CANCELLING": 0, + "RECONCILING": 0, + "RUNNING": 1651445646413, + "FINISHED": 0, + "FAILING": 0, + "INITIALIZING": 1651442575208, + "CREATED": 1651442575302 + }, + "vertices": [ + { + "id": "bc764cd8ddf7a0cff126f51c16239658", + "name": "Source: Custom Source", + "maxParallelism": 128, + "parallelism": 1, + "status": "RUNNING", + "start-time": 1651445646461, + "end-time": -1, + "duration": 16846748, + "tasks": { + "RUNNING": 1, + "FINISHED": 0, + "CANCELED": 0, + "SCHEDULED": 0, + "DEPLOYING": 0, + "INITIALIZING": 0, + "RECONCILING": 0, + "FAILED": 0, + "CREATED": 0, + "CANCELING": 0 + }, + "metrics": { + "read-bytes": 0, + "read-bytes-complete": true, + "write-bytes": 119328736, + "write-bytes-complete": true, + "read-records": 0, + "read-records-complete": true, + "write-records": 8562805, + "write-records-complete": true + } + }, + { + "id": "20ba6b65f97481d5570070de90e4e791", + "name": "Flat Map -> Sink: Print to Std. Out", + "maxParallelism": 128, + "parallelism": 1, + "status": "RUNNING", + "start-time": 1651445646463, + "end-time": -1, + "duration": 16846746, + "tasks": { + "RUNNING": 1, + "FINISHED": 0, + "CANCELED": 0, + "SCHEDULED": 0, + "DEPLOYING": 0, + "INITIALIZING": 0, + "RECONCILING": 0, + "FAILED": 0, + "CREATED": 0, + "CANCELING": 0 + }, + "metrics": { + "read-bytes": 119564908, + "read-bytes-complete": true, + "write-bytes": 0, + "write-bytes-complete": true, + "read-records": 8562798, + "read-records-complete": true, + "write-records": 0, + "write-records-complete": true + } + } + ], + "status-counts": { + "RUNNING": 2, + "FINISHED": 0, + "CANCELED": 0, + "SCHEDULED": 0, + "DEPLOYING": 0, + "INITIALIZING": 0, + "RECONCILING": 0, + "FAILED": 0, + "CREATED": 0, + "CANCELING": 0 + }, + "plan": { + "jid": "54a5c6e527e00e1bb861272a39fe13e4", + "name": "State machine job", + "type": "STREAMING", + "nodes": [ + { + "id": "20ba6b65f97481d5570070de90e4e791", + "parallelism": 1, + "operator": "", + "operator_strategy": "", + "description": "Flat Map -> Sink: Print to Std. Out", + "inputs": [ + { + "num": 0, + "id": "bc764cd8ddf7a0cff126f51c16239658", + "ship_strategy": "HASH", + "exchange": "pipelined_bounded" + } + ], + "optimizer_properties": {} + }, + { + "id": "bc764cd8ddf7a0cff126f51c16239658", + "parallelism": 1, + "operator": "", + "operator_strategy": "", + "description": "Source: Custom Source", + "optimizer_properties": {} + } + ] + } +} diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/subtask_metric_names.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/subtask_metric_names.json new file mode 100644 index 0000000000000..adbeba76de206 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/subtask_metric_names.json @@ -0,0 +1,146 @@ +[ + { + "id": "Shuffle.Netty.Output.Buffers.outPoolUsage" + }, + { + "id": "checkpointStartDelayNanos" + }, + { + "id": "numBytesInLocal" + }, + { + "id": "numBytesInRemotePerSecond" + }, + { + "id": "Shuffle.Netty.Input.numBytesInRemotePerSecond" + }, + { + "id": "Source__Custom_Source.numRecordsInPerSecond" + }, + { + "id": "numBytesOut" + }, + { + "id": "numBytesIn" + }, + { + "id": "numBuffersOut" + }, + { + "id": "Shuffle.Netty.Input.numBuffersInLocal" + }, + { + "id": "numBuffersInRemotePerSecond" + }, + { + "id": "numBytesOutPerSecond" + }, + { + "id": "buffers.outputQueueLength" + }, + { + "id": "numBuffersOutPerSecond" + }, + { + "id": "Shuffle.Netty.Input.Buffers.inputExclusiveBuffersUsage" + }, + { + "id": "isBackPressured" + }, + { + "id": "numBytesInLocalPerSecond" + }, + { + "id": "buffers.inPoolUsage" + }, + { + "id": "idleTimeMsPerSecond" + }, + { + "id": "Shuffle.Netty.Input.numBytesInLocalPerSecond" + }, + { + "id": "numBytesInRemote" + }, + { + "id": "Source__Custom_Source.numRecordsOut" + }, + { + "id": "Shuffle.Netty.Input.numBytesInLocal" + }, + { + "id": "Shuffle.Netty.Input.numBytesInRemote" + }, + { + "id": "busyTimeMsPerSecond" + }, + { + "id": "Shuffle.Netty.Output.Buffers.outputQueueLength" + }, + { + "id": "buffers.inputFloatingBuffersUsage" + }, + { + "id": "Shuffle.Netty.Input.Buffers.inPoolUsage" + }, + { + "id": "numBuffersInLocalPerSecond" + }, + { + "id": "numRecordsOut" + }, + { + "id": "numBuffersInLocal" + }, + { + "id": "Source__Custom_Source.currentOutputWatermark" + }, + { + "id": "numBuffersInRemote" + }, + { + "id": "buffers.inputQueueLength" + }, + { + "id": "Source__Custom_Source.numRecordsOutPerSecond" + }, + { + "id": "numRecordsIn" + }, + { + "id": "Shuffle.Netty.Input.numBuffersInRemote" + }, + { + "id": "numBytesInPerSecond" + }, + { + "id": "backPressuredTimeMsPerSecond" + }, + { + "id": "Shuffle.Netty.Input.Buffers.inputQueueLength" + }, + { + "id": "buffers.inputExclusiveBuffersUsage" + }, + { + "id": "Source__Custom_Source.numRecordsIn" + }, + { + "id": "Shuffle.Netty.Input.numBuffersInRemotePerSecond" + }, + { + "id": "numRecordsOutPerSecond" + }, + { + "id": "buffers.outPoolUsage" + }, + { + "id": "Shuffle.Netty.Input.numBuffersInLocalPerSecond" + }, + { + "id": "numRecordsInPerSecond" + }, + { + "id": "Shuffle.Netty.Input.Buffers.inputFloatingBuffersUsage" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/subtask_metric_values.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/subtask_metric_values.json new file mode 100644 index 0000000000000..a7c6a04c1c054 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/subtask_metric_values.json @@ -0,0 +1,194 @@ +[ + { + "id": "Shuffle.Netty.Output.Buffers.outPoolUsage", + "value": "0.1" + }, + { + "id": "checkpointStartDelayNanos", + "value": "3000000" + }, + { + "id": "numBytesInLocal", + "value": "0" + }, + { + "id": "numBytesInRemotePerSecond", + "value": "0.0" + }, + { + "id": "Shuffle.Netty.Input.numBytesInRemotePerSecond", + "value": "0.0" + }, + { + "id": "Source__Custom_Source.numRecordsInPerSecond", + "value": "0.0" + }, + { + "id": "numBytesOut", + "value": "121864410" + }, + { + "id": "numBytesIn", + "value": "0" + }, + { + "id": "numBuffersOut", + "value": "6119" + }, + { + "id": "Shuffle.Netty.Input.numBuffersInLocal", + "value": "0" + }, + { + "id": "numBuffersInRemotePerSecond", + "value": "0.0" + }, + { + "id": "numBytesOutPerSecond", + "value": "9608.3" + }, + { + "id": "buffers.outputQueueLength", + "value": "1" + }, + { + "id": "numBuffersOutPerSecond", + "value": "0.5" + }, + { + "id": "Shuffle.Netty.Input.Buffers.inputExclusiveBuffersUsage", + "value": "0.0" + }, + { + "id": "isBackPressured", + "value": "false" + }, + { + "id": "numBytesInLocalPerSecond", + "value": "0.0" + }, + { + "id": "buffers.inPoolUsage", + "value": "0.0" + }, + { + "id": "idleTimeMsPerSecond", + "value": "0" + }, + { + "id": "Shuffle.Netty.Input.numBytesInLocalPerSecond", + "value": "0.0" + }, + { + "id": "numBytesInRemote", + "value": "0" + }, + { + "id": "Source__Custom_Source.numRecordsOut", + "value": "8744577" + }, + { + "id": "Shuffle.Netty.Input.numBytesInLocal", + "value": "0" + }, + { + "id": "Shuffle.Netty.Input.numBytesInRemote", + "value": "0" + }, + { + "id": "busyTimeMsPerSecond", + "value": "NaN" + }, + { + "id": "Shuffle.Netty.Output.Buffers.outputQueueLength", + "value": "1" + }, + { + "id": "buffers.inputFloatingBuffersUsage", + "value": "0.0" + }, + { + "id": "Shuffle.Netty.Input.Buffers.inPoolUsage", + "value": "0.0" + }, + { + "id": "numBuffersInLocalPerSecond", + "value": "0.0" + }, + { + "id": "numRecordsOut", + "value": "8744577" + }, + { + "id": "numBuffersInLocal", + "value": "0" + }, + { + "id": "Source__Custom_Source.currentOutputWatermark", + "value": "-9223372036854775808" + }, + { + "id": "numBuffersInRemote", + "value": "0" + }, + { + "id": "buffers.inputQueueLength", + "value": "0" + }, + { + "id": "Source__Custom_Source.numRecordsOutPerSecond", + "value": "689.4833333333333" + }, + { + "id": "numRecordsIn", + "value": "0" + }, + { + "id": "Shuffle.Netty.Input.numBuffersInRemote", + "value": "0" + }, + { + "id": "numBytesInPerSecond", + "value": "0.0" + }, + { + "id": "backPressuredTimeMsPerSecond", + "value": "0" + }, + { + "id": "Shuffle.Netty.Input.Buffers.inputQueueLength", + "value": "0" + }, + { + "id": "buffers.inputExclusiveBuffersUsage", + "value": "0.0" + }, + { + "id": "Source__Custom_Source.numRecordsIn", + "value": "0" + }, + { + "id": "Shuffle.Netty.Input.numBuffersInRemotePerSecond", + "value": "0.0" + }, + { + "id": "numRecordsOutPerSecond", + "value": "689.4833333333333" + }, + { + "id": "buffers.outPoolUsage", + "value": "0.1" + }, + { + "id": "Shuffle.Netty.Input.numBuffersInLocalPerSecond", + "value": "0.0" + }, + { + "id": "numRecordsInPerSecond", + "value": "0.0" + }, + { + "id": "Shuffle.Netty.Input.Buffers.inputFloatingBuffersUsage", + "value": "0.0" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/taskmanager_ids.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/taskmanager_ids.json new file mode 100644 index 0000000000000..feb689b43fdf6 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/taskmanager_ids.json @@ -0,0 +1,47 @@ +{ + "taskmanagers": [ + { + "id": "172.26.0.3:34457-7b2520", + "path": "akka.tcp://flink@172.26.0.3:34457/user/rpc/taskmanager_0", + "dataPort": 44065, + "jmxPort": -1, + "timeSinceLastHeartbeat": 1651461830558, + "slotsNumber": 8, + "freeSlots": 7, + "totalResource": { + "cpuCores": 8.0, + "taskHeapMemory": 383, + "taskOffHeapMemory": 0, + "managedMemory": 512, + "networkMemory": 128, + "extendedResources": {} + }, + "freeResource": { + "cpuCores": 7.0, + "taskHeapMemory": 335, + "taskOffHeapMemory": 0, + "managedMemory": 448, + "networkMemory": 112, + "extendedResources": {} + }, + "hardware": { + "cpuCores": 8, + "physicalMemory": 10447507456, + "freeMemory": 536870912, + "managedMemory": 536870920 + }, + "memoryConfiguration": { + "frameworkHeap": 134217728, + "taskHeap": 402653174, + "frameworkOffHeap": 134217728, + "taskOffHeap": 0, + "networkMemory": 134217730, + "managedMemory": 536870920, + "jvmMetaspace": 268435456, + "jvmOverhead": 201326592, + "totalFlinkMemory": 1342177280, + "totalProcessMemory": 1811939328 + } + } + ] +} diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/taskmanager_metric_names.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/taskmanager_metric_names.json new file mode 100644 index 0000000000000..d9f74d3fc6bd6 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/taskmanager_metric_names.json @@ -0,0 +1,104 @@ +[ + { + "id": "Status.JVM.Memory.Mapped.TotalCapacity" + }, + { + "id": "Status.Network.AvailableMemorySegments" + }, + { + "id": "Status.Network.TotalMemorySegments" + }, + { + "id": "Status.JVM.Memory.Mapped.MemoryUsed" + }, + { + "id": "Status.JVM.CPU.Time" + }, + { + "id": "Status.Flink.Memory.Managed.Total" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Young_Generation.Count" + }, + { + "id": "Status.JVM.Threads.Count" + }, + { + "id": "Status.Shuffle.Netty.UsedMemory" + }, + { + "id": "Status.JVM.Memory.Heap.Committed" + }, + { + "id": "Status.Shuffle.Netty.TotalMemory" + }, + { + "id": "Status.JVM.Memory.Metaspace.Committed" + }, + { + "id": "Status.JVM.Memory.Direct.Count" + }, + { + "id": "Status.Shuffle.Netty.AvailableMemorySegments" + }, + { + "id": "Status.JVM.Memory.NonHeap.Max" + }, + { + "id": "Status.Shuffle.Netty.TotalMemorySegments" + }, + { + "id": "Status.JVM.Memory.NonHeap.Committed" + }, + { + "id": "Status.JVM.Memory.NonHeap.Used" + }, + { + "id": "Status.JVM.Memory.Metaspace.Max" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Old_Generation.Count" + }, + { + "id": "Status.JVM.Memory.Direct.MemoryUsed" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Old_Generation.Time" + }, + { + "id": "Status.JVM.Memory.Direct.TotalCapacity" + }, + { + "id": "Status.Shuffle.Netty.UsedMemorySegments" + }, + { + "id": "Status.JVM.ClassLoader.ClassesLoaded" + }, + { + "id": "Status.JVM.Memory.Mapped.Count" + }, + { + "id": "Status.Flink.Memory.Managed.Used" + }, + { + "id": "Status.JVM.Memory.Metaspace.Used" + }, + { + "id": "Status.JVM.CPU.Load" + }, + { + "id": "Status.JVM.Memory.Heap.Max" + }, + { + "id": "Status.JVM.Memory.Heap.Used" + }, + { + "id": "Status.JVM.ClassLoader.ClassesUnloaded" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Young_Generation.Time" + }, + { + "id": "Status.Shuffle.Netty.AvailableMemory" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/taskmanager_metric_values.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/taskmanager_metric_values.json new file mode 100644 index 0000000000000..1db7add4fb27f --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/taskmanager_metric_values.json @@ -0,0 +1,138 @@ +[ + { + "id": "Status.JVM.Memory.Mapped.TotalCapacity", + "value": "0" + }, + { + "id": "Status.Network.AvailableMemorySegments", + "value": "4092" + }, + { + "id": "Status.Network.TotalMemorySegments", + "value": "4096" + }, + { + "id": "Status.JVM.Memory.Mapped.MemoryUsed", + "value": "0" + }, + { + "id": "Status.JVM.CPU.Time", + "value": "1686900000000" + }, + { + "id": "Status.Flink.Memory.Managed.Total", + "value": "536870920" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Young_Generation.Count", + "value": "26" + }, + { + "id": "Status.JVM.Threads.Count", + "value": "60" + }, + { + "id": "Status.Shuffle.Netty.UsedMemory", + "value": "131072" + }, + { + "id": "Status.JVM.Memory.Heap.Committed", + "value": "536870912" + }, + { + "id": "Status.Shuffle.Netty.TotalMemory", + "value": "134217728" + }, + { + "id": "Status.JVM.Memory.Metaspace.Committed", + "value": "60768256" + }, + { + "id": "Status.JVM.Memory.Direct.Count", + "value": "4124" + }, + { + "id": "Status.Shuffle.Netty.AvailableMemorySegments", + "value": "4092" + }, + { + "id": "Status.JVM.Memory.NonHeap.Max", + "value": "780140544" + }, + { + "id": "Status.Shuffle.Netty.TotalMemorySegments", + "value": "4096" + }, + { + "id": "Status.JVM.Memory.NonHeap.Committed", + "value": "90603520" + }, + { + "id": "Status.JVM.Memory.NonHeap.Used", + "value": "86144672" + }, + { + "id": "Status.JVM.Memory.Metaspace.Max", + "value": "268435456" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Old_Generation.Count", + "value": "0" + }, + { + "id": "Status.JVM.Memory.Direct.MemoryUsed", + "value": "135537665" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Old_Generation.Time", + "value": "0" + }, + { + "id": "Status.JVM.Memory.Direct.TotalCapacity", + "value": "135537664" + }, + { + "id": "Status.Shuffle.Netty.UsedMemorySegments", + "value": "4" + }, + { + "id": "Status.JVM.ClassLoader.ClassesLoaded", + "value": "9192" + }, + { + "id": "Status.JVM.Memory.Mapped.Count", + "value": "0" + }, + { + "id": "Status.Flink.Memory.Managed.Used", + "value": "0" + }, + { + "id": "Status.JVM.Memory.Metaspace.Used", + "value": "57100992" + }, + { + "id": "Status.JVM.CPU.Load", + "value": "0.019790316880669102" + }, + { + "id": "Status.JVM.Memory.Heap.Max", + "value": "536870912" + }, + { + "id": "Status.JVM.Memory.Heap.Used", + "value": "42858976" + }, + { + "id": "Status.JVM.ClassLoader.ClassesUnloaded", + "value": "0" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Young_Generation.Time", + "value": "675" + }, + { + "id": "Status.Shuffle.Netty.AvailableMemory", + "value": "134086656" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/apiresponses/vertices.json b/receiver/flinkmetricsreceiver/testdata/apiresponses/vertices.json new file mode 100644 index 0000000000000..4a13c6454d96d --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/apiresponses/vertices.json @@ -0,0 +1,30 @@ +{ + "id": "bc764cd8ddf7a0cff126f51c16239658", + "name": "Source: Custom Source", + "parallelism": 1, + "maxParallelism": 128, + "now": 1651462696639, + "subtasks": [ + { + "subtask": 0, + "status": "RUNNING", + "attempt": 1, + "host": "flink-worker", + "start-time": 1651445646461, + "end-time": -1, + "duration": 17050178, + "metrics": { + "read-bytes": 0, + "read-bytes-complete": true, + "write-bytes": 120941353, + "write-bytes-complete": true, + "read-records": 0, + "read-records-complete": true, + "write-records": 8678685, + "write-records-complete": true + }, + "taskmanager-id": "172.26.0.3:34457-7b2520", + "start_time": 1651445646461 + } + ] +} diff --git a/receiver/flinkmetricsreceiver/testdata/config.yaml b/receiver/flinkmetricsreceiver/testdata/config.yaml new file mode 100644 index 0000000000000..4a448c1346cc6 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/config.yaml @@ -0,0 +1,17 @@ +receivers: + flinkmetrics: + endpoint: http://localhost:8081 + collection_interval: 10s + +processors: + nop: + +exporters: + nop: + +service: + pipelines: + metrics: + receivers: [flinkmetrics] + processors: [nop] + exporters: [nop] diff --git a/receiver/flinkmetricsreceiver/testdata/expected_metrics/metrics_golden.json b/receiver/flinkmetricsreceiver/testdata/expected_metrics/metrics_golden.json new file mode 100644 index 0000000000000..db1ac3b639ca5 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/expected_metrics/metrics_golden.json @@ -0,0 +1,1617 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "jobmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.24, + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "10", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "9", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "19", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "25", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "8", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "23", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "15", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "13", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "28", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "mock-taskmanager-id" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "taskmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "24", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.28, + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "19", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "32", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "22", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "20", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "9", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "30", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "11", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "27", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "14", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host2" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "mock-taskmanager-id2" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "taskmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "24", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.28, + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "19", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "32", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "22", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "20", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "9", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "30", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "11", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "27", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "14", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.job.name", + "value": { + "stringValue": "mock-job-name" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The number of checkpoints completed or failed.", + "name": "flink.job.checkpoint.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "checkpoint", + "value": { + "stringValue": "failed" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "3", + "attributes": [ + { + "key": "checkpoint", + "value": { + "stringValue": "completed" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{checkpoints}" + }, + { + "description": "The number of checkpoints in progress.", + "name": "flink.job.checkpoint.in_progress", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "2", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "{checkpoints}" + }, + { + "description": "The total size of the last checkpoint.", + "name": "flink.job.last_checkpoint.size", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The end to end duration of the last checkpoint.", + "gauge": { + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "name": "flink.job.last_checkpoint.time", + "unit": "ms" + }, + { + "description": "The total number of restarts since this job was submitted, including full restarts and fine-grained restarts.", + "name": "flink.job.restart.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{restarts}" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host2" + } + }, + { + "key": "flink.job.name", + "value": { + "stringValue": "mock-job-name2" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The number of checkpoints completed or failed.", + "name": "flink.job.checkpoint.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "checkpoint", + "value": { + "stringValue": "failed" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "3", + "attributes": [ + { + "key": "checkpoint", + "value": { + "stringValue": "completed" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{checkpoints}" + }, + { + "description": "The number of checkpoints in progress.", + "name": "flink.job.checkpoint.in_progress", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "2", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "{checkpoints}" + }, + { + "description": "The total size of the last checkpoint.", + "name": "flink.job.last_checkpoint.size", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "By" + }, + { + "description": "The end to end duration of the last checkpoint.", + "gauge": { + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "name": "flink.job.last_checkpoint.time", + "unit": "ms" + }, + { + "description": "The total number of restarts since this job was submitted, including full restarts and fine-grained restarts.", + "name": "flink.job.restart.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{restarts}" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "mock-taskmanager-id" + } + }, + { + "key": "flink.job.name", + "value": { + "stringValue": "mock-job-name" + } + }, + { + "key": "flink.task.name", + "value": { + "stringValue": "mock-task-name" + } + }, + { + "key": "flink.subtask.index", + "value": { + "stringValue": "mock-subtask-index" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The number of records an operator has.", + "name": "flink.operator.record.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Source__Custom_Source" + } + }, + { + "key": "record", + "value": { + "stringValue": "out" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "5", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Source__Custom_Source" + } + }, + { + "key": "record", + "value": { + "stringValue": "in" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "6", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Source__Custom_Source" + } + }, + { + "key": "record", + "value": { + "stringValue": "dropped" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{records}" + }, + { + "description": "The last watermark this operator has emitted.", + "name": "flink.operator.watermark.output", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Source__Custom_Source" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ] + }, + "unit": "ms" + }, + { + "description": "The number of records a task has.", + "name": "flink.task.record.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1", + "attributes": [ + { + "key": "record", + "value": { + "stringValue": "out" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "2", + "attributes": [ + { + "key": "record", + "value": { + "stringValue": "dropped" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + }, + { + "asInt": "4", + "attributes": [ + { + "key": "record", + "value": { + "stringValue": "in" + } + } + ], + "startTimeUnixNano": "1653681344833972000", + "timeUnixNano": "1653681344834031000" + } + ], + "isMonotonic": true + }, + "unit": "{records}" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + } + ] +} diff --git a/receiver/flinkmetricsreceiver/testdata/expected_metrics/metrics_no_jobs_golden.json b/receiver/flinkmetricsreceiver/testdata/expected_metrics/metrics_no_jobs_golden.json new file mode 100644 index 0000000000000..8883e8048238c --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/expected_metrics/metrics_no_jobs_golden.json @@ -0,0 +1,1180 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "jobmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.24, + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "10", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + }, + { + "asInt": "9", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "19", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "25", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "8", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "23", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "15", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "13", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "28", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "mock-taskmanager-id" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "taskmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "24", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.28, + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + }, + { + "asInt": "19", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + }, + { + "asInt": "32", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "22", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "20", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "9", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "30", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "11", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "27", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "14", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host2" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "mock-taskmanager-id2" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "taskmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "24", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.28, + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + }, + { + "asInt": "19", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + }, + { + "asInt": "32", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "22", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "20", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "9", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "30", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "11", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "27", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "14", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344815198000", + "timeUnixNano": "1653681344815257000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + } + ] +} diff --git a/receiver/flinkmetricsreceiver/testdata/expected_metrics/no_metrics.json b/receiver/flinkmetricsreceiver/testdata/expected_metrics/no_metrics.json new file mode 100644 index 0000000000000..0967ef424bce6 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/expected_metrics/no_metrics.json @@ -0,0 +1 @@ +{} diff --git a/receiver/flinkmetricsreceiver/testdata/expected_metrics/partial_metrics_no_jobs.json b/receiver/flinkmetricsreceiver/testdata/expected_metrics/partial_metrics_no_jobs.json new file mode 100644 index 0000000000000..02a7baa0d1c51 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/expected_metrics/partial_metrics_no_jobs.json @@ -0,0 +1,1180 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "jobmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.24, + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "10", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + }, + { + "asInt": "9", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "19", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "25", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "8", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "23", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "15", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "13", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "28", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "mock-taskmanager-id" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "taskmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "24", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.28, + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + }, + { + "asInt": "19", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + }, + { + "asInt": "32", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "22", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "20", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "9", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "30", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "11", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "27", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "14", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host2" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "mock-taskmanager-id2" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "taskmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "24", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.28, + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + }, + { + "asInt": "19", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + }, + { + "asInt": "32", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "22", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "20", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "9", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "30", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "11", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "27", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "14", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344771819000", + "timeUnixNano": "1653681344771868000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + } + ] +} diff --git a/receiver/flinkmetricsreceiver/testdata/expected_metrics/partial_metrics_no_subtasks.json b/receiver/flinkmetricsreceiver/testdata/expected_metrics/partial_metrics_no_subtasks.json new file mode 100644 index 0000000000000..6091cdb12b70f --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/expected_metrics/partial_metrics_no_subtasks.json @@ -0,0 +1,1430 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "jobmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.24, + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "10", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + }, + { + "asInt": "9", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "19", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "25", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "8", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "23", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "15", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "13", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "28", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "mock-taskmanager-id" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "taskmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "24", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.28, + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + }, + { + "asInt": "19", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + }, + { + "asInt": "32", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "22", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "20", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "9", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "30", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "11", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "27", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "14", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host2" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "mock-taskmanager-id2" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "taskmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "24", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.28, + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + }, + { + "asInt": "19", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + }, + { + "asInt": "32", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "22", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "20", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "9", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "30", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "11", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "27", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "14", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.job.name", + "value": { + "stringValue": "mock-job-name" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The number of checkpoints completed or failed.", + "name": "flink.job.checkpoint.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "checkpoint", + "value": { + "stringValue": "failed" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + }, + { + "asInt": "3", + "attributes": [ + { + "key": "checkpoint", + "value": { + "stringValue": "completed" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "{checkpoints}" + }, + { + "description": "The number of checkpoints in progress.", + "name": "flink.job.checkpoint.in_progress", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "2", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "{checkpoints}" + }, + { + "description": "The total size of the last checkpoint.", + "name": "flink.job.last_checkpoint.size", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The end to end duration of the last checkpoint.", + "gauge": { + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "name": "flink.job.last_checkpoint.time", + "unit": "ms" + }, + { + "description": "The total number of restarts since this job was submitted, including full restarts and fine-grained restarts.", + "name": "flink.job.restart.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "{restarts}" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host2" + } + }, + { + "key": "flink.job.name", + "value": { + "stringValue": "mock-job-name2" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The number of checkpoints completed or failed.", + "name": "flink.job.checkpoint.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "checkpoint", + "value": { + "stringValue": "failed" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + }, + { + "asInt": "3", + "attributes": [ + { + "key": "checkpoint", + "value": { + "stringValue": "completed" + } + } + ], + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "{checkpoints}" + }, + { + "description": "The number of checkpoints in progress.", + "name": "flink.job.checkpoint.in_progress", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "2", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "{checkpoints}" + }, + { + "description": "The total size of the last checkpoint.", + "name": "flink.job.last_checkpoint.size", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "unit": "By" + }, + { + "description": "The end to end duration of the last checkpoint.", + "gauge": { + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ] + }, + "name": "flink.job.last_checkpoint.time", + "unit": "ms" + }, + { + "description": "The total number of restarts since this job was submitted, including full restarts and fine-grained restarts.", + "name": "flink.job.restart.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344794462000", + "timeUnixNano": "1653681344794519000" + } + ], + "isMonotonic": true + }, + "unit": "{restarts}" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + } + ] +} diff --git a/receiver/flinkmetricsreceiver/testdata/expected_metrics/partial_metrics_no_taskmanagers.json b/receiver/flinkmetricsreceiver/testdata/expected_metrics/partial_metrics_no_taskmanagers.json new file mode 100644 index 0000000000000..fc2c5da205cc4 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/expected_metrics/partial_metrics_no_taskmanagers.json @@ -0,0 +1,392 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "mock-host" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "jobmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "21", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.24, + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "10", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + }, + { + "asInt": "9", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "19", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "25", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "26", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "8", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "17", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "23", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "15", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "13", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "6", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "28", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "29", + "startTimeUnixNano": "1653681344750260000", + "timeUnixNano": "1653681344750308000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + } + ] +} diff --git a/receiver/flinkmetricsreceiver/testdata/integration/Dockerfile.flink-master b/receiver/flinkmetricsreceiver/testdata/integration/Dockerfile.flink-master new file mode 100644 index 0000000000000..0e1cbc5f38fb3 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/integration/Dockerfile.flink-master @@ -0,0 +1,4 @@ +FROM bde2020/flink-master:1.14.2-hadoop3.2 + +COPY /setup.sh /setup.sh +RUN chmod +x setup.sh diff --git a/receiver/flinkmetricsreceiver/testdata/integration/Dockerfile.flink-worker b/receiver/flinkmetricsreceiver/testdata/integration/Dockerfile.flink-worker new file mode 100644 index 0000000000000..93647ca25da73 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/integration/Dockerfile.flink-worker @@ -0,0 +1,3 @@ +FROM bde2020/flink-worker:1.14.2-hadoop3.2 + +ENV FLINK_MASTER_PORT_6123_TCP_ADDR=flink-master diff --git a/receiver/flinkmetricsreceiver/testdata/integration/expected.json b/receiver/flinkmetricsreceiver/testdata/integration/expected.json new file mode 100644 index 0000000000000..4a4a40fbdd867 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/integration/expected.json @@ -0,0 +1,1242 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "job-localhost" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "jobmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "9397", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.04701864985552929, + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "18810000000", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "2", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "2", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "69", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_MarkSweep" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "26", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "PS_Scavenge" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "538757", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "538758", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1029177344", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "1029177344", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "223422512", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "58720256", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "268435456", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "54813552", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "78839808", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "780140544", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "74126168", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "72", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "{threads}" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "taskmanager-localhost" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "taskmanagerID" + } + }, + { + "key": "flink.resource.type", + "value": { + "stringValue": "taskmanager" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The total number of classes loaded since the start of the JVM.", + "name": "flink.jvm.class_loader.classes_loaded", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "8968", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "{classes}" + }, + { + "description": "The CPU usage of the JVM for a jobmanager or taskmanager.", + "gauge": { + "dataPoints": [ + { + "asDouble": 0.055320834433588595, + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "name": "flink.jvm.cpu.load", + "unit": "%" + }, + { + "description": "The CPU time used by the JVM for a jobmanager or taskmanager.", + "name": "flink.jvm.cpu.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "16230000000", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "ns" + }, + { + "description": "The total number of collections that have occurred.", + "name": "flink.jvm.gc.collections.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "4", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "{collections}" + }, + { + "description": "The total time spent performing garbage collection.", + "name": "flink.jvm.gc.collections.time", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Old_Generation" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "77", + "attributes": [ + { + "key": "garbage_collector_name", + "value": { + "stringValue": "G1_Young_Generation" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "ms" + }, + { + "description": "The total capacity of all buffers in the direct buffer pool.", + "name": "flink.jvm.memory.direct.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "135546882", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the direct buffer pool.", + "name": "flink.jvm.memory.direct.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "135546883", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.heap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "536870912", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of heap memory that can be used for memory management.", + "name": "flink.jvm.memory.heap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "536870912", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of heap memory currently used.", + "name": "flink.jvm.memory.heap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "140509184", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of buffers in the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.total_capacity", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory used by the JVM for the mapped buffer pool.", + "name": "flink.jvm.memory.mapped.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory guaranteed to be available to the JVM in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "57360384", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of memory that can be used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "268435456", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of memory currently used in the Metaspace memory pool.", + "name": "flink.jvm.memory.metaspace.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "54187976", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory guaranteed to be available to the JVM.", + "name": "flink.jvm.memory.nonheap.committed", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "76054528", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The maximum amount of non-heap memory that can be used for memory management.", + "name": "flink.jvm.memory.nonheap.max", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "780140544", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of non-heap memory currently used.", + "name": "flink.jvm.memory.nonheap.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "72345848", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of live threads.", + "name": "flink.jvm.threads.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "57", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "{threads}" + }, + { + "description": "The total amount of managed memory.", + "name": "flink.memory.managed.total", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "536870920", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The amount of managed memory currently used.", + "name": "flink.memory.managed.used", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "job-localhost" + } + }, + { + "key": "flink.job.name", + "value": { + "stringValue": "State machine job" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The number of checkpoints completed or failed.", + "name": "flink.job.checkpoint.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "checkpoint", + "value": { + "stringValue": "failed" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "1", + "attributes": [ + { + "key": "checkpoint", + "value": { + "stringValue": "completed" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "{checkpoints}" + }, + { + "description": "The number of checkpoints in progress.", + "name": "flink.job.checkpoint.in_progress", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "{checkpoints}" + }, + { + "description": "The total size of the last checkpoint.", + "name": "flink.job.last_checkpoint.size", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "5197", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "By" + }, + { + "description": "The end to end duration of the last checkpoint.", + "gauge": { + "dataPoints": [ + { + "asInt": "168", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "name": "flink.job.last_checkpoint.time", + "unit": "ms" + }, + { + "description": "The total number of restarts since this job was submitted, including full restarts and fine-grained restarts.", + "name": "flink.job.restart.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "{restarts}" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "taskmanager-localhost" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "taskmanagerID" + } + }, + { + "key": "flink.job.name", + "value": { + "stringValue": "State machine job" + } + }, + { + "key": "flink.task.name", + "value": { + "stringValue": "Source: Custom Source" + } + }, + { + "key": "flink.subtask.index", + "value": { + "stringValue": "0" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The number of records an operator has.", + "name": "flink.operator.record.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "699", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Source__Custom_Source" + } + }, + { + "key": "record", + "value": { + "stringValue": "out" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Source__Custom_Source" + } + }, + { + "key": "record", + "value": { + "stringValue": "in" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "{records}" + }, + { + "description": "The last watermark this operator has emitted.", + "name": "flink.operator.watermark.output", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "-9223372036854775808", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Source__Custom_Source" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "ms" + }, + { + "description": "The number of records a task has.", + "name": "flink.task.record.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "699", + "attributes": [ + { + "key": "record", + "value": { + "stringValue": "out" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "record", + "value": { + "stringValue": "in" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "{records}" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "taskmanager-localhost" + } + }, + { + "key": "flink.taskmanager.id", + "value": { + "stringValue": "taskmanagerID" + } + }, + { + "key": "flink.job.name", + "value": { + "stringValue": "State machine job" + } + }, + { + "key": "flink.task.name", + "value": { + "stringValue": "Flat Map -\u003e Sink: Print to Std. Out" + } + }, + { + "key": "flink.subtask.index", + "value": { + "stringValue": "0" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "description": "The number of records an operator has.", + "name": "flink.operator.record.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "663", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Flat_Map" + } + }, + { + "key": "record", + "value": { + "stringValue": "in" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Sink__Print_to_Std__Out" + } + }, + { + "key": "record", + "value": { + "stringValue": "out" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Sink__Print_to_Std__Out" + } + }, + { + "key": "record", + "value": { + "stringValue": "in" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Flat_Map" + } + }, + { + "key": "record", + "value": { + "stringValue": "out" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "{records}" + }, + { + "description": "The last watermark this operator has emitted.", + "name": "flink.operator.watermark.output", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "-9223372036854775808", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Flat_Map" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "-9223372036854775808", + "attributes": [ + { + "key": "operator_name", + "value": { + "stringValue": "Sink__Print_to_Std__Out" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ] + }, + "unit": "ms" + }, + { + "description": "The number of records a task has.", + "name": "flink.task.record.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "record", + "value": { + "stringValue": "out" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + }, + { + "asInt": "663", + "attributes": [ + { + "key": "record", + "value": { + "stringValue": "in" + } + } + ], + "startTimeUnixNano": "1653681391125596000", + "timeUnixNano": "1653681391226291000" + } + ], + "isMonotonic": true + }, + "unit": "{records}" + } + ], + "scope": { + "name": "otelcol/flinkmetricsreceiver", + "version": "latest" + } + } + ] + } + ] +} diff --git a/receiver/flinkmetricsreceiver/testdata/integration/setup.sh b/receiver/flinkmetricsreceiver/testdata/integration/setup.sh new file mode 100755 index 0000000000000..d1989d5f14e00 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/integration/setup.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -e + +./usr/local/flink/bin/flink run --detached /usr/local/flink/examples/streaming/StateMachineExample.jar + +exit 0 diff --git a/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_jobmanager_metrics.json b/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_jobmanager_metrics.json new file mode 100644 index 0000000000000..bed30f4a5d852 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_jobmanager_metrics.json @@ -0,0 +1,122 @@ +[ + { + "id": "Status.JVM.GarbageCollector.PS_MarkSweep.Time", + "value": "0" + }, + { + "id": "Status.JVM.Memory.Mapped.TotalCapacity", + "value": "1" + }, + { + "id": "taskSlotsAvailable", + "value": "2" + }, + { + "id": "taskSlotsTotal", + "value": "3" + }, + { + "id": "Status.JVM.Memory.Mapped.MemoryUsed", + "value": "4" + }, + { + "id": "Status.JVM.CPU.Time", + "value": "5" + }, + { + "id": "Status.JVM.Threads.Count", + "value": "6" + }, + { + "id": "Status.JVM.Memory.Heap.Committed", + "value": "7" + }, + { + "id": "Status.JVM.Memory.Metaspace.Committed", + "value": "8" + }, + { + "id": "Status.JVM.GarbageCollector.PS_Scavenge.Time", + "value": "9" + }, + { + "id": "Status.JVM.GarbageCollector.PS_MarkSweep.Count", + "value": "10" + }, + { + "id": "Status.JVM.Memory.Direct.Count", + "value": "11" + }, + { + "id": "Status.JVM.GarbageCollector.PS_Scavenge.Count", + "value": "12" + }, + { + "id": "Status.JVM.Memory.NonHeap.Max", + "value": "13" + }, + { + "id": "numRegisteredTaskManagers", + "value": "14" + }, + { + "id": "Status.JVM.Memory.NonHeap.Committed", + "value": "15" + }, + { + "id": "Status.JVM.Memory.NonHeap.Used", + "value": "16" + }, + { + "id": "Status.JVM.Memory.Metaspace.Max", + "value": "17" + }, + { + "id": "Status.JVM.Memory.Direct.MemoryUsed", + "value": "18" + }, + { + "id": "Status.JVM.Memory.Direct.TotalCapacity", + "value": "19" + }, + { + "id": "numRunningJobs", + "value": "20" + }, + { + "id": "Status.JVM.ClassLoader.ClassesLoaded", + "value": "21" + }, + { + "id": "Status.JVM.Memory.Mapped.Count", + "value": "22" + }, + { + "id": "Status.JVM.Memory.Metaspace.Used", + "value": "23" + }, + { + "id": "Status.JVM.CPU.Load", + "value": "0.24" + }, + { + "id": "Status.JVM.Memory.Heap.Max", + "value": "25" + }, + { + "id": "Status.JVM.Memory.Heap.Used", + "value": "26" + }, + { + "id": "Status.JVM.ClassLoader.ClassesUnloaded", + "value": "27" + }, + { + "id": "Status.Flink.Memory.Managed.Total", + "value": "28" + }, + { + "id": "Status.Flink.Memory.Managed.Used", + "value": "29" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_jobs_metrics.json b/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_jobs_metrics.json new file mode 100644 index 0000000000000..907f12679d942 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_jobs_metrics.json @@ -0,0 +1,26 @@ +[ + { + "id": "numberOfFailedCheckpoints", + "value": "0" + }, + { + "id": "lastCheckpointSize", + "value": "1" + }, + { + "id": "numberOfInProgressCheckpoints", + "value": "2" + }, + { + "id": "numberOfCompletedCheckpoints", + "value": "3" + }, + { + "id": "lastCheckpointDuration", + "value": "4" + }, + { + "id": "numRestarts", + "value": "5" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_subtask_metrics.json b/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_subtask_metrics.json new file mode 100644 index 0000000000000..92e59426827fe --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_subtask_metrics.json @@ -0,0 +1,30 @@ +[ + { + "id": "Source__Custom_Source.numRecordsOut", + "value": "0" + }, + { + "id": "numRecordsOut", + "value": "1" + }, + { + "id": "numLateRecordsDropped", + "value": "2" + }, + { + "id": "Source__Custom_Source.currentOutputWatermark", + "value": "3" + }, + { + "id": "numRecordsIn", + "value": "4" + }, + { + "id": "Source__Custom_Source.numRecordsIn", + "value": "5" + }, + { + "id": "Source__Custom_Source.numLateRecordsDropped", + "value": "6" + } +] diff --git a/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_taskmanager_metrics.json b/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_taskmanager_metrics.json new file mode 100644 index 0000000000000..903b56c23e6a5 --- /dev/null +++ b/receiver/flinkmetricsreceiver/testdata/mockresponses/mock_taskmanager_metrics.json @@ -0,0 +1,138 @@ +[ + { + "id": "Status.JVM.Memory.Mapped.TotalCapacity", + "value": "0" + }, + { + "id": "Status.Network.AvailableMemorySegments", + "value": "1" + }, + { + "id": "Status.Network.TotalMemorySegments", + "value": "2" + }, + { + "id": "Status.JVM.Memory.Mapped.MemoryUsed", + "value": "3" + }, + { + "id": "Status.JVM.CPU.Time", + "value": "4" + }, + { + "id": "Status.Flink.Memory.Managed.Total", + "value": "5" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Young_Generation.Count", + "value": "6" + }, + { + "id": "Status.JVM.Threads.Count", + "value": "7" + }, + { + "id": "Status.Shuffle.Netty.UsedMemory", + "value": "8" + }, + { + "id": "Status.JVM.Memory.Heap.Committed", + "value": "9" + }, + { + "id": "Status.Shuffle.Netty.TotalMemory", + "value": "10" + }, + { + "id": "Status.JVM.Memory.Metaspace.Committed", + "value": "11" + }, + { + "id": "Status.JVM.Memory.Direct.Count", + "value": "12" + }, + { + "id": "Status.Shuffle.Netty.AvailableMemorySegments", + "value": "13" + }, + { + "id": "Status.JVM.Memory.NonHeap.Max", + "value": "14" + }, + { + "id": "Status.Shuffle.Netty.TotalMemorySegments", + "value": "15" + }, + { + "id": "Status.JVM.Memory.NonHeap.Committed", + "value": "16" + }, + { + "id": "Status.JVM.Memory.NonHeap.Used", + "value": "17" + }, + { + "id": "Status.JVM.Memory.Metaspace.Max", + "value": "18" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Old_Generation.Count", + "value": "19" + }, + { + "id": "Status.JVM.Memory.Direct.MemoryUsed", + "value": "20" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Old_Generation.Time", + "value": "21" + }, + { + "id": "Status.JVM.Memory.Direct.TotalCapacity", + "value": "22" + }, + { + "id": "Status.Shuffle.Netty.UsedMemorySegments", + "value": "23" + }, + { + "id": "Status.JVM.ClassLoader.ClassesLoaded", + "value": "24" + }, + { + "id": "Status.JVM.Memory.Mapped.Count", + "value": "25" + }, + { + "id": "Status.Flink.Memory.Managed.Used", + "value": "26" + }, + { + "id": "Status.JVM.Memory.Metaspace.Used", + "value": "27" + }, + { + "id": "Status.JVM.CPU.Load", + "value": "0.28" + }, + { + "id": "Status.JVM.Memory.Heap.Max", + "value": "29" + }, + { + "id": "Status.JVM.Memory.Heap.Used", + "value": "30" + }, + { + "id": "Status.JVM.ClassLoader.ClassesUnloaded", + "value": "31" + }, + { + "id": "Status.JVM.GarbageCollector.G1_Young_Generation.Time", + "value": "32" + }, + { + "id": "Status.Shuffle.Netty.AvailableMemory", + "value": "33" + } +] diff --git a/versions.yaml b/versions.yaml index 2e6c35c482071..75a3469c3f6dd 100644 --- a/versions.yaml +++ b/versions.yaml @@ -148,6 +148,7 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/expvarreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudpubsubreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver