diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c1006b3c648e..9acd10ffc62f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,8 @@ - `pkg/translator/prometheusremotewrite`: Allow to disable sanitize metric labels (#8270) - `basicauthextension`: Implement `configauth.ClientAuthenticator` so that the extension can also be used as HTTP client basic authenticator.(#8847) +- `cmd/mdatagen`: Update generated functions to have simple parse function to handle string parsing consistently and limit code duplication across receivers (#7574) + ### 🧰 Bug fixes 🧰 - `fluentforwardreceiver`: Release port on shutdown (#9111) diff --git a/cmd/mdatagen/main.go b/cmd/mdatagen/main.go index 1a7be8d7b609d..2f7406ac250eb 100644 --- a/cmd/mdatagen/main.go +++ b/cmd/mdatagen/main.go @@ -86,6 +86,14 @@ func generateMetrics(ymlDir string, thisDir string, md metadata, useExpGen bool) "publicVar": func(s string) (string, error) { return formatIdentifier(s, true) }, + "parseImportsRequired": func(metrics map[metricName]metric) bool { + for _, m := range metrics { + if m.Data().HasMetricInputType() { + return true + } + } + return false + }, }).ParseFiles(path.Join(thisDir, tmplFile))) buf := bytes.Buffer{} diff --git a/cmd/mdatagen/metricdata.go b/cmd/mdatagen/metricdata.go index 47d0be608c6b1..670e33f7b055b 100644 --- a/cmd/mdatagen/metricdata.go +++ b/cmd/mdatagen/metricdata.go @@ -32,6 +32,7 @@ type MetricData interface { HasMonotonic() bool HasAggregated() bool HasMetricValueType() bool + HasMetricInputType() bool } // Aggregated defines a metric aggregation type. @@ -59,6 +60,17 @@ type Mono struct { Monotonic bool `mapstructure:"monotonic"` } +// MetricInputType defines the metric input value type +type MetricInputType struct { + // InputType is the type the metric needs to be parsed from, options are "string" + InputType string `mapstructure:"input_type" validate:"omitempty,oneof=string"` +} + +// Type returns name of the datapoint type. +func (mit MetricInputType) String() string { + return mit.InputType +} + // MetricValueType defines the metric number type. type MetricValueType struct { // ValueType is type of the metric number, options are "double", "int". @@ -97,6 +109,7 @@ func (mvt MetricValueType) BasicType() string { type gauge struct { MetricValueType `mapstructure:"value_type"` + MetricInputType `mapstructure:",squash"` } func (d gauge) Type() string { @@ -115,10 +128,15 @@ func (d gauge) HasMetricValueType() bool { return true } +func (d gauge) HasMetricInputType() bool { + return d.InputType != "" +} + type sum struct { Aggregated `mapstructure:",squash"` Mono `mapstructure:",squash"` MetricValueType `mapstructure:"value_type"` + MetricInputType `mapstructure:",squash"` } func (d sum) Type() string { @@ -137,6 +155,10 @@ func (d sum) HasMetricValueType() bool { return true } +func (d sum) HasMetricInputType() bool { + return d.InputType != "" +} + type histogram struct { Aggregated `mapstructure:",squash"` } @@ -156,3 +178,7 @@ func (d histogram) HasAggregated() bool { func (d histogram) HasMetricValueType() bool { return false } + +func (d histogram) HasMetricInputType() bool { + return false +} diff --git a/cmd/mdatagen/metrics_v2.tmpl b/cmd/mdatagen/metrics_v2.tmpl index 7fbf69c16cec6..3e3415cb65832 100644 --- a/cmd/mdatagen/metrics_v2.tmpl +++ b/cmd/mdatagen/metrics_v2.tmpl @@ -3,6 +3,10 @@ package {{ .Package }} import ( + {{- if .Metrics | parseImportsRequired }} + "strconv" + "fmt" + {{- end }} "time" "go.opentelemetry.io/collector/pdata/pcommon" @@ -198,11 +202,39 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { {{ range $name, $metric := .Metrics -}} // Record{{ $name.Render }}DataPoint adds a data point to {{ $name }} metric. func (mb *MetricsBuilder) Record{{ $name.Render }}DataPoint(ts pcommon.Timestamp - {{- if $metric.Data.HasMetricValueType }}, val {{ $metric.Data.MetricValueType.BasicType }}{{ end }} - {{- range $metric.Attributes -}} , {{ .RenderUnexported }}AttributeValue string{{ end }}) { + {{- if $metric.Data.HasMetricInputType }}, val {{ $metric.Data.MetricInputType.String }} + {{- else }} + {{- if $metric.Data.HasMetricValueType }}, val {{ $metric.Data.MetricValueType.BasicType }}{{- end }} + {{- end -}} + {{- range $metric.Attributes -}} , {{ .RenderUnexported }}AttributeValue string{{ end }}) + {{- if $metric.Data.HasMetricInputType }} error{{ end }} { + {{- if $metric.Data.HasMetricInputType }} + {{- if $metric.Data.HasMetricValueType }} + {{- if eq $metric.Data.MetricValueType.BasicType "float64" }} + if f, err := strconv.ParseFloat(val, 64); err != nil { + return fmt.Errorf("failed to parse float for {{ $name.Render }}, value was %s: %w", val, err) + } else { + mb.metric{{ $name.Render }}.recordDataPoint(mb.startTime, ts + {{- if $metric.Data.HasMetricValueType }}, f {{ end }} + {{- range $metric.Attributes -}} , {{ .RenderUnexported }}AttributeValue{{ end }}) + } + {{- end }} + {{- if eq $metric.Data.MetricValueType.BasicType "int64" }} + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for {{ $name.Render }}, value was %s: %w", val, err) + } else { + mb.metric{{ $name.Render }}.recordDataPoint(mb.startTime, ts + {{- if $metric.Data.HasMetricValueType }}, i {{ end }} + {{- range $metric.Attributes -}} , {{ .RenderUnexported }}AttributeValue{{ end }}) + } + {{- end }} + return nil + {{- end }} + {{- else }} mb.metric{{ $name.Render }}.recordDataPoint(mb.startTime, ts {{- if $metric.Data.HasMetricValueType }}, val {{ end }} {{- range $metric.Attributes -}} , {{ .RenderUnexported }}AttributeValue{{ end }}) + {{- end }} } {{ end }} diff --git a/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go b/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go index d1ef628db7dc0..377d5f562c241 100644 --- a/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go @@ -3,6 +3,8 @@ package metadata import ( + "fmt" + "strconv" "time" "go.opentelemetry.io/collector/pdata/pcommon" @@ -458,13 +460,23 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { } // RecordApacheCurrentConnectionsDataPoint adds a data point to apache.current_connections metric. -func (mb *MetricsBuilder) RecordApacheCurrentConnectionsDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { - mb.metricApacheCurrentConnections.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue) +func (mb *MetricsBuilder) RecordApacheCurrentConnectionsDataPoint(ts pcommon.Timestamp, val string, serverNameAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for ApacheCurrentConnections, value was %s: %w", val, err) + } else { + mb.metricApacheCurrentConnections.recordDataPoint(mb.startTime, ts, i, serverNameAttributeValue) + } + return nil } // RecordApacheRequestsDataPoint adds a data point to apache.requests metric. -func (mb *MetricsBuilder) RecordApacheRequestsDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { - mb.metricApacheRequests.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue) +func (mb *MetricsBuilder) RecordApacheRequestsDataPoint(ts pcommon.Timestamp, val string, serverNameAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for ApacheRequests, value was %s: %w", val, err) + } else { + mb.metricApacheRequests.recordDataPoint(mb.startTime, ts, i, serverNameAttributeValue) + } + return nil } // RecordApacheScoreboardDataPoint adds a data point to apache.scoreboard metric. @@ -478,13 +490,23 @@ func (mb *MetricsBuilder) RecordApacheTrafficDataPoint(ts pcommon.Timestamp, val } // RecordApacheUptimeDataPoint adds a data point to apache.uptime metric. -func (mb *MetricsBuilder) RecordApacheUptimeDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { - mb.metricApacheUptime.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue) +func (mb *MetricsBuilder) RecordApacheUptimeDataPoint(ts pcommon.Timestamp, val string, serverNameAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for ApacheUptime, value was %s: %w", val, err) + } else { + mb.metricApacheUptime.recordDataPoint(mb.startTime, ts, i, serverNameAttributeValue) + } + return nil } // RecordApacheWorkersDataPoint adds a data point to apache.workers metric. -func (mb *MetricsBuilder) RecordApacheWorkersDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string, workersStateAttributeValue string) { - mb.metricApacheWorkers.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue, workersStateAttributeValue) +func (mb *MetricsBuilder) RecordApacheWorkersDataPoint(ts pcommon.Timestamp, val string, serverNameAttributeValue string, workersStateAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for ApacheWorkers, value was %s: %w", val, err) + } else { + mb.metricApacheWorkers.recordDataPoint(mb.startTime, ts, i, serverNameAttributeValue, workersStateAttributeValue) + } + return nil } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, diff --git a/receiver/apachereceiver/metadata.yaml b/receiver/apachereceiver/metadata.yaml index 181e6184fd752..d4d2c079dd9ba 100644 --- a/receiver/apachereceiver/metadata.yaml +++ b/receiver/apachereceiver/metadata.yaml @@ -32,6 +32,7 @@ metrics: unit: s sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [ server_name ] @@ -41,6 +42,7 @@ metrics: unit: connections sum: value_type: int + input_type: string monotonic: false aggregation: cumulative attributes: [ server_name ] @@ -50,6 +52,7 @@ metrics: unit: connections sum: value_type: int + input_type: string monotonic: false aggregation: cumulative attributes: [ server_name, workers_state] @@ -59,6 +62,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [ server_name ] diff --git a/receiver/apachereceiver/scraper.go b/receiver/apachereceiver/scraper.go index 7e6dbb6e9c687..822db09091538 100644 --- a/receiver/apachereceiver/scraper.go +++ b/receiver/apachereceiver/scraper.go @@ -26,6 +26,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver/internal/metadata" @@ -69,31 +70,25 @@ func (r *apacheScraper) scrape(context.Context) (pmetric.Metrics, error) { return pmetric.Metrics{}, err } + var errors scrapererror.ScrapeErrors now := pcommon.NewTimestampFromTime(time.Now()) for metricKey, metricValue := range parseStats(stats) { switch metricKey { case "ServerUptimeSeconds": - if i, ok := r.parseInt(metricKey, metricValue); ok { - r.mb.RecordApacheUptimeDataPoint(now, i, r.cfg.serverName) - } + addPartialIfError(errors, r.mb.RecordApacheUptimeDataPoint(now, metricValue, r.cfg.serverName)) case "ConnsTotal": - if i, ok := r.parseInt(metricKey, metricValue); ok { - r.mb.RecordApacheCurrentConnectionsDataPoint(now, i, r.cfg.serverName) - } + addPartialIfError(errors, r.mb.RecordApacheCurrentConnectionsDataPoint(now, metricValue, r.cfg.serverName)) case "BusyWorkers": - if i, ok := r.parseInt(metricKey, metricValue); ok { - r.mb.RecordApacheWorkersDataPoint(now, i, r.cfg.serverName, "busy") - } + addPartialIfError(errors, r.mb.RecordApacheWorkersDataPoint(now, metricValue, r.cfg.serverName, "busy")) case "IdleWorkers": - if i, ok := r.parseInt(metricKey, metricValue); ok { - r.mb.RecordApacheWorkersDataPoint(now, i, r.cfg.serverName, "idle") - } + addPartialIfError(errors, r.mb.RecordApacheWorkersDataPoint(now, metricValue, r.cfg.serverName, "idle")) case "Total Accesses": - if i, ok := r.parseInt(metricKey, metricValue); ok { - r.mb.RecordApacheRequestsDataPoint(now, i, r.cfg.serverName) - } + addPartialIfError(errors, r.mb.RecordApacheRequestsDataPoint(now, metricValue, r.cfg.serverName)) case "Total kBytes": - if i, ok := r.parseInt(metricKey, metricValue); ok { + i, err := strconv.ParseInt(metricValue, 10, 64) + if err != nil { + errors.AddPartial(1, err) + } else { r.mb.RecordApacheTrafficDataPoint(now, kbytesToBytes(i), r.cfg.serverName) } case "Scoreboard": @@ -104,7 +99,13 @@ func (r *apacheScraper) scrape(context.Context) (pmetric.Metrics, error) { } } - return r.mb.Emit(), nil + return r.mb.Emit(), errors.Combine() +} + +func addPartialIfError(errors scrapererror.ScrapeErrors, err error) { + if err != nil { + errors.AddPartial(1, err) + } } // GetStats collects metric stats by making a get request at an endpoint. @@ -138,25 +139,6 @@ func parseStats(resp string) map[string]string { return metrics } -// parseInt converts string to int64. -func (r *apacheScraper) parseInt(key, value string) (int64, bool) { - i, err := strconv.ParseInt(value, 10, 64) - if err != nil { - r.logInvalid("int", key, value) - return 0, false - } - return i, true -} - -func (r *apacheScraper) logInvalid(expectedType, key, value string) { - r.settings.Logger.Info( - "invalid value", - zap.String("expectedType", expectedType), - zap.String("key", key), - zap.String("value", value), - ) -} - type scoreboardCountsByLabel map[string]int64 // parseScoreboard quantifies the symbolic mapping of the scoreboard. diff --git a/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go index 8d88cc1642676..71d046e46d0c1 100644 --- a/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go @@ -3,6 +3,8 @@ package metadata import ( + "fmt" + "strconv" "time" "go.opentelemetry.io/collector/pdata/pcommon" @@ -1117,23 +1119,43 @@ func (mb *MetricsBuilder) RecordMysqlBufferPoolDataPagesDataPoint(ts pcommon.Tim } // RecordMysqlBufferPoolLimitDataPoint adds a data point to mysql.buffer_pool.limit metric. -func (mb *MetricsBuilder) RecordMysqlBufferPoolLimitDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricMysqlBufferPoolLimit.recordDataPoint(mb.startTime, ts, val) +func (mb *MetricsBuilder) RecordMysqlBufferPoolLimitDataPoint(ts pcommon.Timestamp, val string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlBufferPoolLimit, value was %s: %w", val, err) + } else { + mb.metricMysqlBufferPoolLimit.recordDataPoint(mb.startTime, ts, i) + } + return nil } // RecordMysqlBufferPoolOperationsDataPoint adds a data point to mysql.buffer_pool.operations metric. -func (mb *MetricsBuilder) RecordMysqlBufferPoolOperationsDataPoint(ts pcommon.Timestamp, val int64, bufferPoolOperationsAttributeValue string) { - mb.metricMysqlBufferPoolOperations.recordDataPoint(mb.startTime, ts, val, bufferPoolOperationsAttributeValue) +func (mb *MetricsBuilder) RecordMysqlBufferPoolOperationsDataPoint(ts pcommon.Timestamp, val string, bufferPoolOperationsAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlBufferPoolOperations, value was %s: %w", val, err) + } else { + mb.metricMysqlBufferPoolOperations.recordDataPoint(mb.startTime, ts, i, bufferPoolOperationsAttributeValue) + } + return nil } // RecordMysqlBufferPoolPageFlushesDataPoint adds a data point to mysql.buffer_pool.page_flushes metric. -func (mb *MetricsBuilder) RecordMysqlBufferPoolPageFlushesDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricMysqlBufferPoolPageFlushes.recordDataPoint(mb.startTime, ts, val) +func (mb *MetricsBuilder) RecordMysqlBufferPoolPageFlushesDataPoint(ts pcommon.Timestamp, val string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlBufferPoolPageFlushes, value was %s: %w", val, err) + } else { + mb.metricMysqlBufferPoolPageFlushes.recordDataPoint(mb.startTime, ts, i) + } + return nil } // RecordMysqlBufferPoolPagesDataPoint adds a data point to mysql.buffer_pool.pages metric. -func (mb *MetricsBuilder) RecordMysqlBufferPoolPagesDataPoint(ts pcommon.Timestamp, val int64, bufferPoolPagesAttributeValue string) { - mb.metricMysqlBufferPoolPages.recordDataPoint(mb.startTime, ts, val, bufferPoolPagesAttributeValue) +func (mb *MetricsBuilder) RecordMysqlBufferPoolPagesDataPoint(ts pcommon.Timestamp, val string, bufferPoolPagesAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlBufferPoolPages, value was %s: %w", val, err) + } else { + mb.metricMysqlBufferPoolPages.recordDataPoint(mb.startTime, ts, i, bufferPoolPagesAttributeValue) + } + return nil } // RecordMysqlBufferPoolUsageDataPoint adds a data point to mysql.buffer_pool.usage metric. @@ -1142,58 +1164,113 @@ func (mb *MetricsBuilder) RecordMysqlBufferPoolUsageDataPoint(ts pcommon.Timesta } // RecordMysqlCommandsDataPoint adds a data point to mysql.commands metric. -func (mb *MetricsBuilder) RecordMysqlCommandsDataPoint(ts pcommon.Timestamp, val int64, commandAttributeValue string) { - mb.metricMysqlCommands.recordDataPoint(mb.startTime, ts, val, commandAttributeValue) +func (mb *MetricsBuilder) RecordMysqlCommandsDataPoint(ts pcommon.Timestamp, val string, commandAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlCommands, value was %s: %w", val, err) + } else { + mb.metricMysqlCommands.recordDataPoint(mb.startTime, ts, i, commandAttributeValue) + } + return nil } // RecordMysqlDoubleWritesDataPoint adds a data point to mysql.double_writes metric. -func (mb *MetricsBuilder) RecordMysqlDoubleWritesDataPoint(ts pcommon.Timestamp, val int64, doubleWritesAttributeValue string) { - mb.metricMysqlDoubleWrites.recordDataPoint(mb.startTime, ts, val, doubleWritesAttributeValue) +func (mb *MetricsBuilder) RecordMysqlDoubleWritesDataPoint(ts pcommon.Timestamp, val string, doubleWritesAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlDoubleWrites, value was %s: %w", val, err) + } else { + mb.metricMysqlDoubleWrites.recordDataPoint(mb.startTime, ts, i, doubleWritesAttributeValue) + } + return nil } // RecordMysqlHandlersDataPoint adds a data point to mysql.handlers metric. -func (mb *MetricsBuilder) RecordMysqlHandlersDataPoint(ts pcommon.Timestamp, val int64, handlerAttributeValue string) { - mb.metricMysqlHandlers.recordDataPoint(mb.startTime, ts, val, handlerAttributeValue) +func (mb *MetricsBuilder) RecordMysqlHandlersDataPoint(ts pcommon.Timestamp, val string, handlerAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlHandlers, value was %s: %w", val, err) + } else { + mb.metricMysqlHandlers.recordDataPoint(mb.startTime, ts, i, handlerAttributeValue) + } + return nil } // RecordMysqlLocksDataPoint adds a data point to mysql.locks metric. -func (mb *MetricsBuilder) RecordMysqlLocksDataPoint(ts pcommon.Timestamp, val int64, locksAttributeValue string) { - mb.metricMysqlLocks.recordDataPoint(mb.startTime, ts, val, locksAttributeValue) +func (mb *MetricsBuilder) RecordMysqlLocksDataPoint(ts pcommon.Timestamp, val string, locksAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlLocks, value was %s: %w", val, err) + } else { + mb.metricMysqlLocks.recordDataPoint(mb.startTime, ts, i, locksAttributeValue) + } + return nil } // RecordMysqlLogOperationsDataPoint adds a data point to mysql.log_operations metric. -func (mb *MetricsBuilder) RecordMysqlLogOperationsDataPoint(ts pcommon.Timestamp, val int64, logOperationsAttributeValue string) { - mb.metricMysqlLogOperations.recordDataPoint(mb.startTime, ts, val, logOperationsAttributeValue) +func (mb *MetricsBuilder) RecordMysqlLogOperationsDataPoint(ts pcommon.Timestamp, val string, logOperationsAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlLogOperations, value was %s: %w", val, err) + } else { + mb.metricMysqlLogOperations.recordDataPoint(mb.startTime, ts, i, logOperationsAttributeValue) + } + return nil } // RecordMysqlOperationsDataPoint adds a data point to mysql.operations metric. -func (mb *MetricsBuilder) RecordMysqlOperationsDataPoint(ts pcommon.Timestamp, val int64, operationsAttributeValue string) { - mb.metricMysqlOperations.recordDataPoint(mb.startTime, ts, val, operationsAttributeValue) +func (mb *MetricsBuilder) RecordMysqlOperationsDataPoint(ts pcommon.Timestamp, val string, operationsAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlOperations, value was %s: %w", val, err) + } else { + mb.metricMysqlOperations.recordDataPoint(mb.startTime, ts, i, operationsAttributeValue) + } + return nil } // RecordMysqlPageOperationsDataPoint adds a data point to mysql.page_operations metric. -func (mb *MetricsBuilder) RecordMysqlPageOperationsDataPoint(ts pcommon.Timestamp, val int64, pageOperationsAttributeValue string) { - mb.metricMysqlPageOperations.recordDataPoint(mb.startTime, ts, val, pageOperationsAttributeValue) +func (mb *MetricsBuilder) RecordMysqlPageOperationsDataPoint(ts pcommon.Timestamp, val string, pageOperationsAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlPageOperations, value was %s: %w", val, err) + } else { + mb.metricMysqlPageOperations.recordDataPoint(mb.startTime, ts, i, pageOperationsAttributeValue) + } + return nil } // RecordMysqlRowLocksDataPoint adds a data point to mysql.row_locks metric. -func (mb *MetricsBuilder) RecordMysqlRowLocksDataPoint(ts pcommon.Timestamp, val int64, rowLocksAttributeValue string) { - mb.metricMysqlRowLocks.recordDataPoint(mb.startTime, ts, val, rowLocksAttributeValue) +func (mb *MetricsBuilder) RecordMysqlRowLocksDataPoint(ts pcommon.Timestamp, val string, rowLocksAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlRowLocks, value was %s: %w", val, err) + } else { + mb.metricMysqlRowLocks.recordDataPoint(mb.startTime, ts, i, rowLocksAttributeValue) + } + return nil } // RecordMysqlRowOperationsDataPoint adds a data point to mysql.row_operations metric. -func (mb *MetricsBuilder) RecordMysqlRowOperationsDataPoint(ts pcommon.Timestamp, val int64, rowOperationsAttributeValue string) { - mb.metricMysqlRowOperations.recordDataPoint(mb.startTime, ts, val, rowOperationsAttributeValue) +func (mb *MetricsBuilder) RecordMysqlRowOperationsDataPoint(ts pcommon.Timestamp, val string, rowOperationsAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlRowOperations, value was %s: %w", val, err) + } else { + mb.metricMysqlRowOperations.recordDataPoint(mb.startTime, ts, i, rowOperationsAttributeValue) + } + return nil } // RecordMysqlSortsDataPoint adds a data point to mysql.sorts metric. -func (mb *MetricsBuilder) RecordMysqlSortsDataPoint(ts pcommon.Timestamp, val int64, sortsAttributeValue string) { - mb.metricMysqlSorts.recordDataPoint(mb.startTime, ts, val, sortsAttributeValue) +func (mb *MetricsBuilder) RecordMysqlSortsDataPoint(ts pcommon.Timestamp, val string, sortsAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlSorts, value was %s: %w", val, err) + } else { + mb.metricMysqlSorts.recordDataPoint(mb.startTime, ts, i, sortsAttributeValue) + } + return nil } // RecordMysqlThreadsDataPoint adds a data point to mysql.threads metric. -func (mb *MetricsBuilder) RecordMysqlThreadsDataPoint(ts pcommon.Timestamp, val int64, threadsAttributeValue string) { - mb.metricMysqlThreads.recordDataPoint(mb.startTime, ts, val, threadsAttributeValue) +func (mb *MetricsBuilder) RecordMysqlThreadsDataPoint(ts pcommon.Timestamp, val string, threadsAttributeValue string) error { + if i, err := strconv.ParseInt(val, 10, 64); err != nil { + return fmt.Errorf("failed to parse int for MysqlThreads, value was %s: %w", val, err) + } else { + mb.metricMysqlThreads.recordDataPoint(mb.startTime, ts, i, threadsAttributeValue) + } + return nil } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, diff --git a/receiver/mysqlreceiver/metadata.yaml b/receiver/mysqlreceiver/metadata.yaml index eb863bb8b04de..4e0103e8af619 100644 --- a/receiver/mysqlreceiver/metadata.yaml +++ b/receiver/mysqlreceiver/metadata.yaml @@ -65,6 +65,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: false aggregation: cumulative attributes: [buffer_pool_pages] @@ -83,6 +84,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative mysql.buffer_pool.operations: @@ -91,6 +93,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [buffer_pool_operations] @@ -100,6 +103,7 @@ metrics: unit: By sum: value_type: int + input_type: string monotonic: false aggregation: cumulative mysql.buffer_pool.usage: @@ -117,6 +121,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [command] @@ -126,6 +131,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [handler] @@ -135,6 +141,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [double_writes] @@ -144,6 +151,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [log_operations] @@ -153,6 +161,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [operations] @@ -162,6 +171,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [page_operations] @@ -171,6 +181,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [row_locks] @@ -180,6 +191,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [row_operations] @@ -189,6 +201,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [locks] @@ -198,6 +211,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: true aggregation: cumulative attributes: [sorts] @@ -207,6 +221,7 @@ metrics: unit: 1 sum: value_type: int + input_type: string monotonic: false aggregation: cumulative attributes: [threads] diff --git a/receiver/mysqlreceiver/scraper.go b/receiver/mysqlreceiver/scraper.go index 85d6fea192247..8582865167a00 100644 --- a/receiver/mysqlreceiver/scraper.go +++ b/receiver/mysqlreceiver/scraper.go @@ -87,11 +87,7 @@ func (m *mySQLScraper) scrape(context.Context) (pmetric.Metrics, error) { if k != "buffer_pool_size" { continue } - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolLimitDataPoint(now, i) - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolLimitDataPoint(now, v)) } // collect global status metrics. @@ -109,409 +105,167 @@ func (m *mySQLScraper) scrape(context.Context) (pmetric.Metrics, error) { // buffer_pool.pages case "Innodb_buffer_pool_pages_data": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolPagesDataPoint(now, i, "data") - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolPagesDataPoint(now, v, "data")) case "Innodb_buffer_pool_pages_free": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolPagesDataPoint(now, i, "free") - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolPagesDataPoint(now, v, "free")) case "Innodb_buffer_pool_pages_misc": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolPagesDataPoint(now, i, "misc") - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolPagesDataPoint(now, v, "misc")) // buffer_pool.page_flushes case "Innodb_buffer_pool_pages_flushed": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolPageFlushesDataPoint(now, i) - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolPageFlushesDataPoint(now, v)) // buffer_pool.operations case "Innodb_buffer_pool_read_ahead_rnd": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "read_ahead_rnd") - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "read_ahead_rnd")) case "Innodb_buffer_pool_read_ahead": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "read_ahead") - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "read_ahead")) case "Innodb_buffer_pool_read_ahead_evicted": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "read_ahead_evicted") - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "read_ahead_evicted")) case "Innodb_buffer_pool_read_requests": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "read_requests") - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "read_requests")) case "Innodb_buffer_pool_reads": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "reads") - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "reads")) case "Innodb_buffer_pool_wait_free": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "wait_free") - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "wait_free")) case "Innodb_buffer_pool_write_requests": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "write_requests") - } + addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "write_requests")) // commands case "Com_stmt_execute": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlCommandsDataPoint(now, i, "execute") - } + addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "execute")) case "Com_stmt_close": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlCommandsDataPoint(now, i, "close") - } + addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "close")) case "Com_stmt_fetch": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlCommandsDataPoint(now, i, "fetch") - } + addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "fetch")) case "Com_stmt_prepare": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlCommandsDataPoint(now, i, "prepare") - } + addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "prepare")) case "Com_stmt_reset": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlCommandsDataPoint(now, i, "reset") - } + addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "reset")) case "Com_stmt_send_long_data": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlCommandsDataPoint(now, i, "send_long_data") - } + addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "send_long_data")) // handlers case "Handler_commit": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "commit") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "commit")) case "Handler_delete": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "delete") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "delete")) case "Handler_discover": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "discover") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "discover")) case "Handler_external_lock": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "lock") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "lock")) case "Handler_mrr_init": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "mrr_init") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "mrr_init")) case "Handler_prepare": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "prepare") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "prepare")) case "Handler_read_first": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "read_first") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_first")) case "Handler_read_key": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "read_key") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_key")) case "Handler_read_last": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "read_last") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_last")) case "Handler_read_next": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "read_next") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_next")) case "Handler_read_prev": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "read_prev") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_prev")) case "Handler_read_rnd": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "read_rnd") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_rnd")) case "Handler_read_rnd_next": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "read_rnd_next") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_rnd_next")) case "Handler_rollback": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "rollback") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "rollback")) case "Handler_savepoint": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "savepoint") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "savepoint")) case "Handler_savepoint_rollback": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "savepoint_rollback") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "savepoint_rollback")) case "Handler_update": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "update") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "update")) case "Handler_write": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlHandlersDataPoint(now, i, "write") - } + addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "write")) // double_writes case "Innodb_dblwr_pages_written": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlDoubleWritesDataPoint(now, i, "written") - } + addPartialIfError(errors, m.mb.RecordMysqlDoubleWritesDataPoint(now, v, "written")) case "Innodb_dblwr_writes": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlDoubleWritesDataPoint(now, i, "writes") - } + addPartialIfError(errors, m.mb.RecordMysqlDoubleWritesDataPoint(now, v, "writes")) // log_operations case "Innodb_log_waits": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlLogOperationsDataPoint(now, i, "waits") - } + addPartialIfError(errors, m.mb.RecordMysqlLogOperationsDataPoint(now, v, "waits")) case "Innodb_log_write_requests": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlLogOperationsDataPoint(now, i, "requests") - } + addPartialIfError(errors, m.mb.RecordMysqlLogOperationsDataPoint(now, v, "requests")) case "Innodb_log_writes": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlLogOperationsDataPoint(now, i, "writes") - } + addPartialIfError(errors, m.mb.RecordMysqlLogOperationsDataPoint(now, v, "writes")) // operations case "Innodb_data_fsyncs": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlOperationsDataPoint(now, i, "fsyncs") - } + addPartialIfError(errors, m.mb.RecordMysqlOperationsDataPoint(now, v, "fsyncs")) case "Innodb_data_reads": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlOperationsDataPoint(now, i, "reads") - } + addPartialIfError(errors, m.mb.RecordMysqlOperationsDataPoint(now, v, "reads")) case "Innodb_data_writes": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlOperationsDataPoint(now, i, "writes") - } + addPartialIfError(errors, m.mb.RecordMysqlOperationsDataPoint(now, v, "writes")) // page_operations case "Innodb_pages_created": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlPageOperationsDataPoint(now, i, "created") - } + addPartialIfError(errors, m.mb.RecordMysqlPageOperationsDataPoint(now, v, "created")) case "Innodb_pages_read": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlPageOperationsDataPoint(now, i, "read") - } + addPartialIfError(errors, m.mb.RecordMysqlPageOperationsDataPoint(now, v, "read")) case "Innodb_pages_written": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlPageOperationsDataPoint(now, i, "written") - } + addPartialIfError(errors, m.mb.RecordMysqlPageOperationsDataPoint(now, v, "written")) // row_locks case "Innodb_row_lock_waits": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlRowLocksDataPoint(now, i, "waits") - } + addPartialIfError(errors, m.mb.RecordMysqlRowLocksDataPoint(now, v, "waits")) case "Innodb_row_lock_time": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlRowLocksDataPoint(now, i, "time") - } + addPartialIfError(errors, m.mb.RecordMysqlRowLocksDataPoint(now, v, "time")) // row_operations case "Innodb_rows_deleted": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlRowOperationsDataPoint(now, i, "deleted") - } + addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v, "deleted")) case "Innodb_rows_inserted": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlRowOperationsDataPoint(now, i, "inserted") - } + addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v, "inserted")) case "Innodb_rows_read": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlRowOperationsDataPoint(now, i, "read") - } + addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v, "read")) case "Innodb_rows_updated": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlRowOperationsDataPoint(now, i, "updated") - } + addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v, "updated")) // locks case "Table_locks_immediate": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlLocksDataPoint(now, i, "immediate") - } + addPartialIfError(errors, m.mb.RecordMysqlLocksDataPoint(now, v, "immediate")) case "Table_locks_waited": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlLocksDataPoint(now, i, "waited") - } + addPartialIfError(errors, m.mb.RecordMysqlLocksDataPoint(now, v, "waited")) // sorts case "Sort_merge_passes": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlSortsDataPoint(now, i, "merge_passes") - } + addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, "merge_passes")) case "Sort_range": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlSortsDataPoint(now, i, "range") - } + addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, "range")) case "Sort_rows": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlSortsDataPoint(now, i, "rows") - } + addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, "rows")) case "Sort_scan": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlSortsDataPoint(now, i, "scan") - } + addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, "scan")) // threads case "Threads_cached": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlThreadsDataPoint(now, i, "cached") - } + addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, "cached")) case "Threads_connected": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlThreadsDataPoint(now, i, "connected") - } + addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, "connected")) case "Threads_created": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlThreadsDataPoint(now, i, "created") - } + addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, "created")) case "Threads_running": - if i, err := parseInt(v); err != nil { - errors.AddPartial(1, err) - } else { - m.mb.RecordMysqlThreadsDataPoint(now, i, "running") - } + addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, "running")) } } return m.mb.Emit(), errors.Combine() } +func addPartialIfError(errors scrapererror.ScrapeErrors, err error) { + if err != nil { + errors.AddPartial(1, err) + } +} + func (m *mySQLScraper) recordDataPages(now pcommon.Timestamp, globalStats map[string]string, errors scrapererror.ScrapeErrors) { dirty, err := parseInt(globalStats["Innodb_buffer_pool_pages_dirty"]) if err != nil {