diff --git a/CHANGELOG.md b/CHANGELOG.md index ca6bc120bd0b2..4c5e7f1c3fef7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,9 +13,9 @@ - `awscontainerinsightreceiver`: add full pod name when configured to AWS Container Insights Receiver (#7415) - `hostreceiver/loadscraper`: Migrate the scraper to the mdatagen metrics builder (#7288) - `awsecscontainermetricsreceiver`: Rename attributes to follow semantic conventions (#7425) -- `mysqlreceiver`: Add golden files for integration test - `datadogexporter`: Always map conventional attributes to tags (#7185) - +- `mysqlreceiver`: Add golden files for integration test (#7303) +- `mysqlreceiver`: Update to use mdatagen v2 (#7507) ## 🛑 Breaking changes 🛑 ## 🚀 New components 🚀 diff --git a/receiver/mysqlreceiver/codegen.go b/receiver/mysqlreceiver/codegen.go new file mode 100644 index 0000000000000..9eb6ea400ecaf --- /dev/null +++ b/receiver/mysqlreceiver/codegen.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mdatagen --experimental-gen metadata.yaml + +package mysqlreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mysqlreceiver" diff --git a/receiver/mysqlreceiver/config.go b/receiver/mysqlreceiver/config.go index a8448c545e4b0..650f10f802dc3 100644 --- a/receiver/mysqlreceiver/config.go +++ b/receiver/mysqlreceiver/config.go @@ -17,6 +17,8 @@ package mysqlreceiver // import "github.com/open-telemetry/opentelemetry-collect import ( "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mysqlreceiver/internal/metadata" ) type Config struct { @@ -26,4 +28,5 @@ type Config struct { Database string `mapstructure:"database,omitempty"` AllowNativePasswords bool `mapstructure:"allow_native_passwords,omitempty"` confignet.NetAddr `mapstructure:",squash"` + Metrics metadata.MetricsSettings `mapstructure:"metrics"` } diff --git a/receiver/mysqlreceiver/debug.test b/receiver/mysqlreceiver/debug.test deleted file mode 100755 index cfae20040904b..0000000000000 Binary files a/receiver/mysqlreceiver/debug.test and /dev/null differ diff --git a/receiver/mysqlreceiver/factory.go b/receiver/mysqlreceiver/factory.go index 7ad80b025e770..b6a30f41da717 100644 --- a/receiver/mysqlreceiver/factory.go +++ b/receiver/mysqlreceiver/factory.go @@ -14,8 +14,6 @@ package mysqlreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mysqlreceiver" -//go:generate mdatagen metadata.yaml - import ( "context" "time" @@ -26,6 +24,8 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver/receiverhelper" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mysqlreceiver/internal/metadata" ) const ( @@ -51,6 +51,7 @@ func createDefaultConfig() config.Receiver { Endpoint: "localhost:3306", Transport: "tcp", }, + Metrics: metadata.DefaultMetricsSettings(), } } diff --git a/receiver/mysqlreceiver/internal/metadata/generated_metrics.go b/receiver/mysqlreceiver/internal/metadata/generated_metrics.go deleted file mode 100644 index ef3079ace84ef..0000000000000 --- a/receiver/mysqlreceiver/internal/metadata/generated_metrics.go +++ /dev/null @@ -1,516 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" -) - -// Type is the component type name. -const Type config.Type = "mysqlreceiver" - -// MetricIntf is an interface to generically interact with generated metric. -type MetricIntf interface { - Name() string - New() pdata.Metric - Init(metric pdata.Metric) -} - -// Intentionally not exposing this so that it is opaque and can change freely. -type metricImpl struct { - name string - initFunc func(pdata.Metric) -} - -// Name returns the metric name. -func (m *metricImpl) Name() string { - return m.name -} - -// New creates a metric object preinitialized. -func (m *metricImpl) New() pdata.Metric { - metric := pdata.NewMetric() - m.Init(metric) - return metric -} - -// Init initializes the provided metric object. -func (m *metricImpl) Init(metric pdata.Metric) { - m.initFunc(metric) -} - -type metricStruct struct { - MysqlBufferPoolOperations MetricIntf - MysqlBufferPoolPages MetricIntf - MysqlBufferPoolSize MetricIntf - MysqlCommands MetricIntf - MysqlDoubleWrites MetricIntf - MysqlHandlers MetricIntf - MysqlLocks MetricIntf - MysqlLogOperations MetricIntf - MysqlOperations MetricIntf - MysqlPageOperations MetricIntf - MysqlRowLocks MetricIntf - MysqlRowOperations MetricIntf - MysqlSorts MetricIntf - MysqlThreads MetricIntf -} - -// Names returns a list of all the metric name strings. -func (m *metricStruct) Names() []string { - return []string{ - "mysql.buffer_pool_operations", - "mysql.buffer_pool_pages", - "mysql.buffer_pool_size", - "mysql.commands", - "mysql.double_writes", - "mysql.handlers", - "mysql.locks", - "mysql.log_operations", - "mysql.operations", - "mysql.page_operations", - "mysql.row_locks", - "mysql.row_operations", - "mysql.sorts", - "mysql.threads", - } -} - -var metricsByName = map[string]MetricIntf{ - "mysql.buffer_pool_operations": Metrics.MysqlBufferPoolOperations, - "mysql.buffer_pool_pages": Metrics.MysqlBufferPoolPages, - "mysql.buffer_pool_size": Metrics.MysqlBufferPoolSize, - "mysql.commands": Metrics.MysqlCommands, - "mysql.double_writes": Metrics.MysqlDoubleWrites, - "mysql.handlers": Metrics.MysqlHandlers, - "mysql.locks": Metrics.MysqlLocks, - "mysql.log_operations": Metrics.MysqlLogOperations, - "mysql.operations": Metrics.MysqlOperations, - "mysql.page_operations": Metrics.MysqlPageOperations, - "mysql.row_locks": Metrics.MysqlRowLocks, - "mysql.row_operations": Metrics.MysqlRowOperations, - "mysql.sorts": Metrics.MysqlSorts, - "mysql.threads": Metrics.MysqlThreads, -} - -func (m *metricStruct) ByName(n string) MetricIntf { - return metricsByName[n] -} - -// Metrics contains a set of methods for each metric that help with -// manipulating those metrics. -var Metrics = &metricStruct{ - &metricImpl{ - "mysql.buffer_pool_operations", - func(metric pdata.Metric) { - metric.SetName("mysql.buffer_pool_operations") - metric.SetDescription("The number of operations on the InnoDB buffer pool.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.buffer_pool_pages", - func(metric pdata.Metric) { - metric.SetName("mysql.buffer_pool_pages") - metric.SetDescription("The number of pages in the InnoDB buffer pool.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.buffer_pool_size", - func(metric pdata.Metric) { - metric.SetName("mysql.buffer_pool_size") - metric.SetDescription("The number of bytes in the InnoDB buffer pool.") - metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.commands", - func(metric pdata.Metric) { - metric.SetName("mysql.commands") - metric.SetDescription("The number of times each type of command has been executed.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.double_writes", - func(metric pdata.Metric) { - metric.SetName("mysql.double_writes") - metric.SetDescription("The number of writes to the InnoDB doublewrite buffer.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.handlers", - func(metric pdata.Metric) { - metric.SetName("mysql.handlers") - metric.SetDescription("The number of requests to various MySQL handlers.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.locks", - func(metric pdata.Metric) { - metric.SetName("mysql.locks") - metric.SetDescription("The number of MySQL locks.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.log_operations", - func(metric pdata.Metric) { - metric.SetName("mysql.log_operations") - metric.SetDescription("The number of InndoDB log operations.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.operations", - func(metric pdata.Metric) { - metric.SetName("mysql.operations") - metric.SetDescription("The number of InndoDB operations.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.page_operations", - func(metric pdata.Metric) { - metric.SetName("mysql.page_operations") - metric.SetDescription("The number of InndoDB page operations.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.row_locks", - func(metric pdata.Metric) { - metric.SetName("mysql.row_locks") - metric.SetDescription("The number of InndoDB row locks.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.row_operations", - func(metric pdata.Metric) { - metric.SetName("mysql.row_operations") - metric.SetDescription("The number of InndoDB row operations.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.sorts", - func(metric pdata.Metric) { - metric.SetName("mysql.sorts") - metric.SetDescription("The number of MySQL sorts.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mysql.threads", - func(metric pdata.Metric) { - metric.SetName("mysql.threads") - metric.SetDescription("The state of MySQL threads.") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, -} - -// M contains a set of methods for each metric that help with -// manipulating those metrics. M is an alias for Metrics -var M = Metrics - -// Attributes contains the possible metric attributes that can be used. -var Attributes = struct { - // BufferPoolOperations (The buffer pool operations types.) - BufferPoolOperations string - // BufferPoolPages (The buffer pool pages types.) - BufferPoolPages string - // BufferPoolSize (The buffer pool size types.) - BufferPoolSize string - // Command (The command types.) - Command string - // DoubleWrites (The doublewrite types.) - DoubleWrites string - // Handler (The handler types.) - Handler string - // Locks (The table locks type.) - Locks string - // LogOperations (The log operation types.) - LogOperations string - // Operations (The operation types.) - Operations string - // PageOperations (The page operation types.) - PageOperations string - // RowLocks (The row lock type.) - RowLocks string - // RowOperations (The row operation type.) - RowOperations string - // Sorts (The sort count type.) - Sorts string - // Threads (The thread count type.) - Threads string -}{ - "operation", - "kind", - "kind", - "command", - "kind", - "kind", - "kind", - "operation", - "operation", - "operation", - "kind", - "operation", - "kind", - "kind", -} - -// A is an alias for Attributes. -var A = Attributes - -// AttributeBufferPoolOperations are the possible values that the attribute "buffer_pool_operations" can have. -var AttributeBufferPoolOperations = struct { - ReadAheadRnd string - ReadAhead string - ReadAheadEvicted string - ReadRequests string - Reads string - WaitFree string - WriteRequests string -}{ - "read_ahead_rnd", - "read_ahead", - "read_ahead_evicted", - "read_requests", - "reads", - "wait_free", - "write_requests", -} - -// AttributeBufferPoolPages are the possible values that the attribute "buffer_pool_pages" can have. -var AttributeBufferPoolPages = struct { - Data string - Dirty string - Flushed string - Free string - Misc string - Total string -}{ - "data", - "dirty", - "flushed", - "free", - "misc", - "total", -} - -// AttributeBufferPoolSize are the possible values that the attribute "buffer_pool_size" can have. -var AttributeBufferPoolSize = struct { - Data string - Dirty string - Total string -}{ - "data", - "dirty", - "total", -} - -// AttributeCommand are the possible values that the attribute "command" can have. -var AttributeCommand = struct { - Execute string - Close string - Fetch string - Prepare string - Reset string - SendLongData string -}{ - "execute", - "close", - "fetch", - "prepare", - "reset", - "send_long_data", -} - -// AttributeDoubleWrites are the possible values that the attribute "double_writes" can have. -var AttributeDoubleWrites = struct { - PagesWritten string - Writes string -}{ - "pages_written", - "writes", -} - -// AttributeHandler are the possible values that the attribute "handler" can have. -var AttributeHandler = struct { - Commit string - Delete string - Discover string - ExternalLock string - MrrInit string - Prepare string - ReadFirst string - ReadKey string - ReadLast string - ReadNext string - ReadPrev string - ReadRnd string - ReadRndNext string - Rollback string - Savepoint string - SavepointRollback string - Update string - Write string -}{ - "commit", - "delete", - "discover", - "external_lock", - "mrr_init", - "prepare", - "read_first", - "read_key", - "read_last", - "read_next", - "read_prev", - "read_rnd", - "read_rnd_next", - "rollback", - "savepoint", - "savepoint_rollback", - "update", - "write", -} - -// AttributeLocks are the possible values that the attribute "locks" can have. -var AttributeLocks = struct { - Immediate string - Waited string -}{ - "immediate", - "waited", -} - -// AttributeLogOperations are the possible values that the attribute "log_operations" can have. -var AttributeLogOperations = struct { - Waits string - WriteRequests string - Writes string -}{ - "waits", - "write_requests", - "writes", -} - -// AttributeOperations are the possible values that the attribute "operations" can have. -var AttributeOperations = struct { - Fsyncs string - Reads string - Writes string -}{ - "fsyncs", - "reads", - "writes", -} - -// AttributePageOperations are the possible values that the attribute "page_operations" can have. -var AttributePageOperations = struct { - Created string - Read string - Written string -}{ - "created", - "read", - "written", -} - -// AttributeRowLocks are the possible values that the attribute "row_locks" can have. -var AttributeRowLocks = struct { - Waits string - Time string -}{ - "waits", - "time", -} - -// AttributeRowOperations are the possible values that the attribute "row_operations" can have. -var AttributeRowOperations = struct { - Deleted string - Inserted string - Read string - Updated string -}{ - "deleted", - "inserted", - "read", - "updated", -} - -// AttributeSorts are the possible values that the attribute "sorts" can have. -var AttributeSorts = struct { - MergePasses string - Range string - Rows string - Scan string -}{ - "merge_passes", - "range", - "rows", - "scan", -} - -// AttributeThreads are the possible values that the attribute "threads" can have. -var AttributeThreads = struct { - Cached string - Connected string - Created string - Running string -}{ - "cached", - "connected", - "created", - "running", -} diff --git a/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go new file mode 100644 index 0000000000000..01394747f8e3d --- /dev/null +++ b/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go @@ -0,0 +1,1228 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "time" + + "go.opentelemetry.io/collector/model/pdata" +) + +// MetricSettings provides common settings for a particular metric. +type MetricSettings struct { + Enabled bool `mapstructure:"enabled"` +} + +// MetricsSettings provides settings for mysqlreceiver metrics. +type MetricsSettings struct { + MysqlBufferPoolOperations MetricSettings `mapstructure:"mysql.buffer_pool_operations"` + MysqlBufferPoolPages MetricSettings `mapstructure:"mysql.buffer_pool_pages"` + MysqlBufferPoolSize MetricSettings `mapstructure:"mysql.buffer_pool_size"` + MysqlCommands MetricSettings `mapstructure:"mysql.commands"` + MysqlDoubleWrites MetricSettings `mapstructure:"mysql.double_writes"` + MysqlHandlers MetricSettings `mapstructure:"mysql.handlers"` + MysqlLocks MetricSettings `mapstructure:"mysql.locks"` + MysqlLogOperations MetricSettings `mapstructure:"mysql.log_operations"` + MysqlOperations MetricSettings `mapstructure:"mysql.operations"` + MysqlPageOperations MetricSettings `mapstructure:"mysql.page_operations"` + MysqlRowLocks MetricSettings `mapstructure:"mysql.row_locks"` + MysqlRowOperations MetricSettings `mapstructure:"mysql.row_operations"` + MysqlSorts MetricSettings `mapstructure:"mysql.sorts"` + MysqlThreads MetricSettings `mapstructure:"mysql.threads"` +} + +func DefaultMetricsSettings() MetricsSettings { + return MetricsSettings{ + MysqlBufferPoolOperations: MetricSettings{ + Enabled: true, + }, + MysqlBufferPoolPages: MetricSettings{ + Enabled: true, + }, + MysqlBufferPoolSize: MetricSettings{ + Enabled: true, + }, + MysqlCommands: MetricSettings{ + Enabled: true, + }, + MysqlDoubleWrites: MetricSettings{ + Enabled: true, + }, + MysqlHandlers: MetricSettings{ + Enabled: true, + }, + MysqlLocks: MetricSettings{ + Enabled: true, + }, + MysqlLogOperations: MetricSettings{ + Enabled: true, + }, + MysqlOperations: MetricSettings{ + Enabled: true, + }, + MysqlPageOperations: MetricSettings{ + Enabled: true, + }, + MysqlRowLocks: MetricSettings{ + Enabled: true, + }, + MysqlRowOperations: MetricSettings{ + Enabled: true, + }, + MysqlSorts: MetricSettings{ + Enabled: true, + }, + MysqlThreads: MetricSettings{ + Enabled: true, + }, + } +} + +type metricMysqlBufferPoolOperations struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.buffer_pool_operations metric with initial data. +func (m *metricMysqlBufferPoolOperations) init() { + m.data.SetName("mysql.buffer_pool_operations") + m.data.SetDescription("The number of operations on the InnoDB buffer pool.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlBufferPoolOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, bufferPoolOperationsAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.BufferPoolOperations, pdata.NewAttributeValueString(bufferPoolOperationsAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlBufferPoolOperations) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlBufferPoolOperations) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlBufferPoolOperations(settings MetricSettings) metricMysqlBufferPoolOperations { + m := metricMysqlBufferPoolOperations{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlBufferPoolPages struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.buffer_pool_pages metric with initial data. +func (m *metricMysqlBufferPoolPages) init() { + m.data.SetName("mysql.buffer_pool_pages") + m.data.SetDescription("The number of pages in the InnoDB buffer pool.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlBufferPoolPages) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, bufferPoolPagesAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.BufferPoolPages, pdata.NewAttributeValueString(bufferPoolPagesAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlBufferPoolPages) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlBufferPoolPages) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlBufferPoolPages(settings MetricSettings) metricMysqlBufferPoolPages { + m := metricMysqlBufferPoolPages{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlBufferPoolSize struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.buffer_pool_size metric with initial data. +func (m *metricMysqlBufferPoolSize) init() { + m.data.SetName("mysql.buffer_pool_size") + m.data.SetDescription("The number of bytes in the InnoDB buffer pool.") + m.data.SetUnit("By") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlBufferPoolSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, bufferPoolSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.BufferPoolSize, pdata.NewAttributeValueString(bufferPoolSizeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlBufferPoolSize) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlBufferPoolSize) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlBufferPoolSize(settings MetricSettings) metricMysqlBufferPoolSize { + m := metricMysqlBufferPoolSize{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlCommands struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.commands metric with initial data. +func (m *metricMysqlCommands) init() { + m.data.SetName("mysql.commands") + m.data.SetDescription("The number of times each type of command has been executed.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlCommands) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, commandAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Command, pdata.NewAttributeValueString(commandAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlCommands) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlCommands) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlCommands(settings MetricSettings) metricMysqlCommands { + m := metricMysqlCommands{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlDoubleWrites struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.double_writes metric with initial data. +func (m *metricMysqlDoubleWrites) init() { + m.data.SetName("mysql.double_writes") + m.data.SetDescription("The number of writes to the InnoDB doublewrite buffer.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlDoubleWrites) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, doubleWritesAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.DoubleWrites, pdata.NewAttributeValueString(doubleWritesAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlDoubleWrites) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlDoubleWrites) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlDoubleWrites(settings MetricSettings) metricMysqlDoubleWrites { + m := metricMysqlDoubleWrites{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlHandlers struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.handlers metric with initial data. +func (m *metricMysqlHandlers) init() { + m.data.SetName("mysql.handlers") + m.data.SetDescription("The number of requests to various MySQL handlers.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlHandlers) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, handlerAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Handler, pdata.NewAttributeValueString(handlerAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlHandlers) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlHandlers) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlHandlers(settings MetricSettings) metricMysqlHandlers { + m := metricMysqlHandlers{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlLocks struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.locks metric with initial data. +func (m *metricMysqlLocks) init() { + m.data.SetName("mysql.locks") + m.data.SetDescription("The number of MySQL locks.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlLocks) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, locksAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Locks, pdata.NewAttributeValueString(locksAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlLocks) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlLocks) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlLocks(settings MetricSettings) metricMysqlLocks { + m := metricMysqlLocks{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlLogOperations struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.log_operations metric with initial data. +func (m *metricMysqlLogOperations) init() { + m.data.SetName("mysql.log_operations") + m.data.SetDescription("The number of InndoDB log operations.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlLogOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, logOperationsAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.LogOperations, pdata.NewAttributeValueString(logOperationsAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlLogOperations) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlLogOperations) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlLogOperations(settings MetricSettings) metricMysqlLogOperations { + m := metricMysqlLogOperations{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlOperations struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.operations metric with initial data. +func (m *metricMysqlOperations) init() { + m.data.SetName("mysql.operations") + m.data.SetDescription("The number of InndoDB operations.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, operationsAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Operations, pdata.NewAttributeValueString(operationsAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlOperations) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlOperations) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlOperations(settings MetricSettings) metricMysqlOperations { + m := metricMysqlOperations{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlPageOperations struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.page_operations metric with initial data. +func (m *metricMysqlPageOperations) init() { + m.data.SetName("mysql.page_operations") + m.data.SetDescription("The number of InndoDB page operations.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlPageOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, pageOperationsAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.PageOperations, pdata.NewAttributeValueString(pageOperationsAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlPageOperations) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlPageOperations) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlPageOperations(settings MetricSettings) metricMysqlPageOperations { + m := metricMysqlPageOperations{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlRowLocks struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.row_locks metric with initial data. +func (m *metricMysqlRowLocks) init() { + m.data.SetName("mysql.row_locks") + m.data.SetDescription("The number of InndoDB row locks.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlRowLocks) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, rowLocksAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.RowLocks, pdata.NewAttributeValueString(rowLocksAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlRowLocks) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlRowLocks) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlRowLocks(settings MetricSettings) metricMysqlRowLocks { + m := metricMysqlRowLocks{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlRowOperations struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.row_operations metric with initial data. +func (m *metricMysqlRowOperations) init() { + m.data.SetName("mysql.row_operations") + m.data.SetDescription("The number of InndoDB row operations.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlRowOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, rowOperationsAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.RowOperations, pdata.NewAttributeValueString(rowOperationsAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlRowOperations) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlRowOperations) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlRowOperations(settings MetricSettings) metricMysqlRowOperations { + m := metricMysqlRowOperations{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlSorts struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.sorts metric with initial data. +func (m *metricMysqlSorts) init() { + m.data.SetName("mysql.sorts") + m.data.SetDescription("The number of MySQL sorts.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlSorts) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, sortsAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Sorts, pdata.NewAttributeValueString(sortsAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlSorts) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlSorts) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlSorts(settings MetricSettings) metricMysqlSorts { + m := metricMysqlSorts{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMysqlThreads struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mysql.threads metric with initial data. +func (m *metricMysqlThreads) init() { + m.data.SetName("mysql.threads") + m.data.SetDescription("The state of MySQL threads.") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMysqlThreads) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, threadsAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Threads, pdata.NewAttributeValueString(threadsAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMysqlThreads) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMysqlThreads) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMysqlThreads(settings MetricSettings) metricMysqlThreads { + m := metricMysqlThreads{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user settings. +type MetricsBuilder struct { + startTime pdata.Timestamp + metricMysqlBufferPoolOperations metricMysqlBufferPoolOperations + metricMysqlBufferPoolPages metricMysqlBufferPoolPages + metricMysqlBufferPoolSize metricMysqlBufferPoolSize + metricMysqlCommands metricMysqlCommands + metricMysqlDoubleWrites metricMysqlDoubleWrites + metricMysqlHandlers metricMysqlHandlers + metricMysqlLocks metricMysqlLocks + metricMysqlLogOperations metricMysqlLogOperations + metricMysqlOperations metricMysqlOperations + metricMysqlPageOperations metricMysqlPageOperations + metricMysqlRowLocks metricMysqlRowLocks + metricMysqlRowOperations metricMysqlRowOperations + metricMysqlSorts metricMysqlSorts + metricMysqlThreads metricMysqlThreads +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + startTime: pdata.NewTimestampFromTime(time.Now()), + metricMysqlBufferPoolOperations: newMetricMysqlBufferPoolOperations(settings.MysqlBufferPoolOperations), + metricMysqlBufferPoolPages: newMetricMysqlBufferPoolPages(settings.MysqlBufferPoolPages), + metricMysqlBufferPoolSize: newMetricMysqlBufferPoolSize(settings.MysqlBufferPoolSize), + metricMysqlCommands: newMetricMysqlCommands(settings.MysqlCommands), + metricMysqlDoubleWrites: newMetricMysqlDoubleWrites(settings.MysqlDoubleWrites), + metricMysqlHandlers: newMetricMysqlHandlers(settings.MysqlHandlers), + metricMysqlLocks: newMetricMysqlLocks(settings.MysqlLocks), + metricMysqlLogOperations: newMetricMysqlLogOperations(settings.MysqlLogOperations), + metricMysqlOperations: newMetricMysqlOperations(settings.MysqlOperations), + metricMysqlPageOperations: newMetricMysqlPageOperations(settings.MysqlPageOperations), + metricMysqlRowLocks: newMetricMysqlRowLocks(settings.MysqlRowLocks), + metricMysqlRowOperations: newMetricMysqlRowOperations(settings.MysqlRowOperations), + metricMysqlSorts: newMetricMysqlSorts(settings.MysqlSorts), + metricMysqlThreads: newMetricMysqlThreads(settings.MysqlThreads), + } + for _, op := range options { + op(mb) + } + return mb +} + +// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording +// another set of data points. This function will be doing all transformations required to produce metric representation +// defined in metadata and user settings, e.g. delta/cumulative translation. +func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { + mb.metricMysqlBufferPoolOperations.emit(metrics) + mb.metricMysqlBufferPoolPages.emit(metrics) + mb.metricMysqlBufferPoolSize.emit(metrics) + mb.metricMysqlCommands.emit(metrics) + mb.metricMysqlDoubleWrites.emit(metrics) + mb.metricMysqlHandlers.emit(metrics) + mb.metricMysqlLocks.emit(metrics) + mb.metricMysqlLogOperations.emit(metrics) + mb.metricMysqlOperations.emit(metrics) + mb.metricMysqlPageOperations.emit(metrics) + mb.metricMysqlRowLocks.emit(metrics) + mb.metricMysqlRowOperations.emit(metrics) + mb.metricMysqlSorts.emit(metrics) + mb.metricMysqlThreads.emit(metrics) +} + +// RecordMysqlBufferPoolOperationsDataPoint adds a data point to mysql.buffer_pool_operations metric. +func (mb *MetricsBuilder) RecordMysqlBufferPoolOperationsDataPoint(ts pdata.Timestamp, val int64, bufferPoolOperationsAttributeValue string) { + mb.metricMysqlBufferPoolOperations.recordDataPoint(mb.startTime, ts, val, bufferPoolOperationsAttributeValue) +} + +// RecordMysqlBufferPoolPagesDataPoint adds a data point to mysql.buffer_pool_pages metric. +func (mb *MetricsBuilder) RecordMysqlBufferPoolPagesDataPoint(ts pdata.Timestamp, val float64, bufferPoolPagesAttributeValue string) { + mb.metricMysqlBufferPoolPages.recordDataPoint(mb.startTime, ts, val, bufferPoolPagesAttributeValue) +} + +// RecordMysqlBufferPoolSizeDataPoint adds a data point to mysql.buffer_pool_size metric. +func (mb *MetricsBuilder) RecordMysqlBufferPoolSizeDataPoint(ts pdata.Timestamp, val float64, bufferPoolSizeAttributeValue string) { + mb.metricMysqlBufferPoolSize.recordDataPoint(mb.startTime, ts, val, bufferPoolSizeAttributeValue) +} + +// RecordMysqlCommandsDataPoint adds a data point to mysql.commands metric. +func (mb *MetricsBuilder) RecordMysqlCommandsDataPoint(ts pdata.Timestamp, val int64, commandAttributeValue string) { + mb.metricMysqlCommands.recordDataPoint(mb.startTime, ts, val, commandAttributeValue) +} + +// RecordMysqlDoubleWritesDataPoint adds a data point to mysql.double_writes metric. +func (mb *MetricsBuilder) RecordMysqlDoubleWritesDataPoint(ts pdata.Timestamp, val int64, doubleWritesAttributeValue string) { + mb.metricMysqlDoubleWrites.recordDataPoint(mb.startTime, ts, val, doubleWritesAttributeValue) +} + +// RecordMysqlHandlersDataPoint adds a data point to mysql.handlers metric. +func (mb *MetricsBuilder) RecordMysqlHandlersDataPoint(ts pdata.Timestamp, val int64, handlerAttributeValue string) { + mb.metricMysqlHandlers.recordDataPoint(mb.startTime, ts, val, handlerAttributeValue) +} + +// RecordMysqlLocksDataPoint adds a data point to mysql.locks metric. +func (mb *MetricsBuilder) RecordMysqlLocksDataPoint(ts pdata.Timestamp, val int64, locksAttributeValue string) { + mb.metricMysqlLocks.recordDataPoint(mb.startTime, ts, val, locksAttributeValue) +} + +// RecordMysqlLogOperationsDataPoint adds a data point to mysql.log_operations metric. +func (mb *MetricsBuilder) RecordMysqlLogOperationsDataPoint(ts pdata.Timestamp, val int64, logOperationsAttributeValue string) { + mb.metricMysqlLogOperations.recordDataPoint(mb.startTime, ts, val, logOperationsAttributeValue) +} + +// RecordMysqlOperationsDataPoint adds a data point to mysql.operations metric. +func (mb *MetricsBuilder) RecordMysqlOperationsDataPoint(ts pdata.Timestamp, val int64, operationsAttributeValue string) { + mb.metricMysqlOperations.recordDataPoint(mb.startTime, ts, val, operationsAttributeValue) +} + +// RecordMysqlPageOperationsDataPoint adds a data point to mysql.page_operations metric. +func (mb *MetricsBuilder) RecordMysqlPageOperationsDataPoint(ts pdata.Timestamp, val int64, pageOperationsAttributeValue string) { + mb.metricMysqlPageOperations.recordDataPoint(mb.startTime, ts, val, pageOperationsAttributeValue) +} + +// RecordMysqlRowLocksDataPoint adds a data point to mysql.row_locks metric. +func (mb *MetricsBuilder) RecordMysqlRowLocksDataPoint(ts pdata.Timestamp, val int64, rowLocksAttributeValue string) { + mb.metricMysqlRowLocks.recordDataPoint(mb.startTime, ts, val, rowLocksAttributeValue) +} + +// RecordMysqlRowOperationsDataPoint adds a data point to mysql.row_operations metric. +func (mb *MetricsBuilder) RecordMysqlRowOperationsDataPoint(ts pdata.Timestamp, val int64, rowOperationsAttributeValue string) { + mb.metricMysqlRowOperations.recordDataPoint(mb.startTime, ts, val, rowOperationsAttributeValue) +} + +// RecordMysqlSortsDataPoint adds a data point to mysql.sorts metric. +func (mb *MetricsBuilder) RecordMysqlSortsDataPoint(ts pdata.Timestamp, val int64, sortsAttributeValue string) { + mb.metricMysqlSorts.recordDataPoint(mb.startTime, ts, val, sortsAttributeValue) +} + +// RecordMysqlThreadsDataPoint adds a data point to mysql.threads metric. +func (mb *MetricsBuilder) RecordMysqlThreadsDataPoint(ts pdata.Timestamp, val float64, threadsAttributeValue string) { + mb.metricMysqlThreads.recordDataPoint(mb.startTime, ts, val, threadsAttributeValue) +} + +// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, +// and metrics builder should update its startTime and reset it's internal state accordingly. +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { + mb.startTime = pdata.NewTimestampFromTime(time.Now()) + for _, op := range options { + op(mb) + } +} + +// Attributes contains the possible metric attributes that can be used. +var Attributes = struct { + // BufferPoolOperations (The buffer pool operations types.) + BufferPoolOperations string + // BufferPoolPages (The buffer pool pages types.) + BufferPoolPages string + // BufferPoolSize (The buffer pool size types.) + BufferPoolSize string + // Command (The command types.) + Command string + // DoubleWrites (The doublewrite types.) + DoubleWrites string + // Handler (The handler types.) + Handler string + // Locks (The table locks type.) + Locks string + // LogOperations (The log operation types.) + LogOperations string + // Operations (The operation types.) + Operations string + // PageOperations (The page operation types.) + PageOperations string + // RowLocks (The row lock type.) + RowLocks string + // RowOperations (The row operation type.) + RowOperations string + // Sorts (The sort count type.) + Sorts string + // Threads (The thread count type.) + Threads string +}{ + "operation", + "kind", + "kind", + "command", + "kind", + "kind", + "kind", + "operation", + "operation", + "operation", + "kind", + "operation", + "kind", + "kind", +} + +// A is an alias for Attributes. +var A = Attributes + +// AttributeBufferPoolOperations are the possible values that the attribute "buffer_pool_operations" can have. +var AttributeBufferPoolOperations = struct { + ReadAheadRnd string + ReadAhead string + ReadAheadEvicted string + ReadRequests string + Reads string + WaitFree string + WriteRequests string +}{ + "read_ahead_rnd", + "read_ahead", + "read_ahead_evicted", + "read_requests", + "reads", + "wait_free", + "write_requests", +} + +// AttributeBufferPoolPages are the possible values that the attribute "buffer_pool_pages" can have. +var AttributeBufferPoolPages = struct { + Data string + Dirty string + Flushed string + Free string + Misc string + Total string +}{ + "data", + "dirty", + "flushed", + "free", + "misc", + "total", +} + +// AttributeBufferPoolSize are the possible values that the attribute "buffer_pool_size" can have. +var AttributeBufferPoolSize = struct { + Data string + Dirty string + Total string +}{ + "data", + "dirty", + "total", +} + +// AttributeCommand are the possible values that the attribute "command" can have. +var AttributeCommand = struct { + Execute string + Close string + Fetch string + Prepare string + Reset string + SendLongData string +}{ + "execute", + "close", + "fetch", + "prepare", + "reset", + "send_long_data", +} + +// AttributeDoubleWrites are the possible values that the attribute "double_writes" can have. +var AttributeDoubleWrites = struct { + PagesWritten string + Writes string +}{ + "pages_written", + "writes", +} + +// AttributeHandler are the possible values that the attribute "handler" can have. +var AttributeHandler = struct { + Commit string + Delete string + Discover string + ExternalLock string + MrrInit string + Prepare string + ReadFirst string + ReadKey string + ReadLast string + ReadNext string + ReadPrev string + ReadRnd string + ReadRndNext string + Rollback string + Savepoint string + SavepointRollback string + Update string + Write string +}{ + "commit", + "delete", + "discover", + "external_lock", + "mrr_init", + "prepare", + "read_first", + "read_key", + "read_last", + "read_next", + "read_prev", + "read_rnd", + "read_rnd_next", + "rollback", + "savepoint", + "savepoint_rollback", + "update", + "write", +} + +// AttributeLocks are the possible values that the attribute "locks" can have. +var AttributeLocks = struct { + Immediate string + Waited string +}{ + "immediate", + "waited", +} + +// AttributeLogOperations are the possible values that the attribute "log_operations" can have. +var AttributeLogOperations = struct { + Waits string + WriteRequests string + Writes string +}{ + "waits", + "write_requests", + "writes", +} + +// AttributeOperations are the possible values that the attribute "operations" can have. +var AttributeOperations = struct { + Fsyncs string + Reads string + Writes string +}{ + "fsyncs", + "reads", + "writes", +} + +// AttributePageOperations are the possible values that the attribute "page_operations" can have. +var AttributePageOperations = struct { + Created string + Read string + Written string +}{ + "created", + "read", + "written", +} + +// AttributeRowLocks are the possible values that the attribute "row_locks" can have. +var AttributeRowLocks = struct { + Waits string + Time string +}{ + "waits", + "time", +} + +// AttributeRowOperations are the possible values that the attribute "row_operations" can have. +var AttributeRowOperations = struct { + Deleted string + Inserted string + Read string + Updated string +}{ + "deleted", + "inserted", + "read", + "updated", +} + +// AttributeSorts are the possible values that the attribute "sorts" can have. +var AttributeSorts = struct { + MergePasses string + Range string + Rows string + Scan string +}{ + "merge_passes", + "range", + "rows", + "scan", +} + +// AttributeThreads are the possible values that the attribute "threads" can have. +var AttributeThreads = struct { + Cached string + Connected string + Created string + Running string +}{ + "cached", + "connected", + "created", + "running", +} diff --git a/receiver/mysqlreceiver/scraper.go b/receiver/mysqlreceiver/scraper.go index b0c4e8577cd58..8ac31d91fd8e3 100644 --- a/receiver/mysqlreceiver/scraper.go +++ b/receiver/mysqlreceiver/scraper.go @@ -31,6 +31,7 @@ type mySQLScraper struct { sqlclient client logger *zap.Logger config *Config + mb *metadata.MetricsBuilder } func newMySQLScraper( @@ -40,6 +41,7 @@ func newMySQLScraper( return &mySQLScraper{ logger: logger, config: config, + mb: metadata.NewMetricsBuilder(config.Metrics), } } @@ -64,33 +66,6 @@ func (m *mySQLScraper) shutdown(context.Context) error { return m.sqlclient.Close() } -// initMetric initializes a metric with a metadata label. -func initMetric(ms pdata.MetricSlice, mi metadata.MetricIntf) pdata.Metric { - m := ms.AppendEmpty() - mi.Init(m) - return m -} - -// addToDoubleMetric adds and labels a double gauge datapoint to a metricslice. -func addToDoubleMetric(metric pdata.NumberDataPointSlice, labels pdata.AttributeMap, value float64, ts pdata.Timestamp) { - dataPoint := metric.AppendEmpty() - dataPoint.SetTimestamp(ts) - dataPoint.SetDoubleVal(value) - if labels.Len() > 0 { - labels.CopyTo(dataPoint.Attributes()) - } -} - -// addToIntMetric adds and labels a int sum datapoint to metricslice. -func addToIntMetric(metric pdata.NumberDataPointSlice, labels pdata.AttributeMap, value int64, ts pdata.Timestamp) { - dataPoint := metric.AppendEmpty() - dataPoint.SetTimestamp(ts) - dataPoint.SetIntVal(value) - if labels.Len() > 0 { - labels.CopyTo(dataPoint.Attributes()) - } -} - // scrape scrapes the mysql db metric stats, transforms them and labels them into a metric slices. func (m *mySQLScraper) scrape(context.Context) (pdata.Metrics, error) { if m.sqlclient == nil { @@ -103,21 +78,6 @@ func (m *mySQLScraper) scrape(context.Context) (pdata.Metrics, error) { ilm.InstrumentationLibrary().SetName("otel/mysql") now := pdata.NewTimestampFromTime(time.Now()) - bufferPoolPages := initMetric(ilm.Metrics(), metadata.M.MysqlBufferPoolPages).Sum().DataPoints() - bufferPoolOperations := initMetric(ilm.Metrics(), metadata.M.MysqlBufferPoolOperations).Sum().DataPoints() - bufferPoolSize := initMetric(ilm.Metrics(), metadata.M.MysqlBufferPoolSize).Sum().DataPoints() - commands := initMetric(ilm.Metrics(), metadata.M.MysqlCommands).Sum().DataPoints() - handlers := initMetric(ilm.Metrics(), metadata.M.MysqlHandlers).Sum().DataPoints() - doubleWrites := initMetric(ilm.Metrics(), metadata.M.MysqlDoubleWrites).Sum().DataPoints() - logOperations := initMetric(ilm.Metrics(), metadata.M.MysqlLogOperations).Sum().DataPoints() - operations := initMetric(ilm.Metrics(), metadata.M.MysqlOperations).Sum().DataPoints() - pageOperations := initMetric(ilm.Metrics(), metadata.M.MysqlPageOperations).Sum().DataPoints() - rowLocks := initMetric(ilm.Metrics(), metadata.M.MysqlRowLocks).Sum().DataPoints() - rowOperations := initMetric(ilm.Metrics(), metadata.M.MysqlRowOperations).Sum().DataPoints() - locks := initMetric(ilm.Metrics(), metadata.M.MysqlLocks).Sum().DataPoints() - sorts := initMetric(ilm.Metrics(), metadata.M.MysqlSorts).Sum().DataPoints() - threads := initMetric(ilm.Metrics(), metadata.M.MysqlThreads).Sum().DataPoints() - // collect innodb metrics. innodbStats, err := m.sqlclient.getInnodbStats() if err != nil { @@ -128,10 +88,8 @@ func (m *mySQLScraper) scrape(context.Context) (pdata.Metrics, error) { if k != "buffer_pool_size" { continue } - labels := pdata.NewAttributeMap() if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.BufferPoolSize, pdata.NewAttributeValueString("total")) - addToDoubleMetric(bufferPoolSize, labels, f, now) + m.mb.RecordMysqlBufferPoolSizeDataPoint(now, f, "total") } } @@ -143,368 +101,303 @@ func (m *mySQLScraper) scrape(context.Context) (pdata.Metrics, error) { } for k, v := range globalStats { - labels := pdata.NewAttributeMap() switch k { // buffer_pool_pages case "Innodb_buffer_pool_pages_data": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.BufferPoolPages, pdata.NewAttributeValueString("data")) - addToDoubleMetric(bufferPoolPages, labels, f, now) + m.mb.RecordMysqlBufferPoolPagesDataPoint(now, f, "data") } case "Innodb_buffer_pool_pages_dirty": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.BufferPoolPages, pdata.NewAttributeValueString("dirty")) - addToDoubleMetric(bufferPoolPages, labels, f, now) + m.mb.RecordMysqlBufferPoolPagesDataPoint(now, f, "dirty") } case "Innodb_buffer_pool_pages_flushed": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.BufferPoolPages, pdata.NewAttributeValueString("flushed")) - addToDoubleMetric(bufferPoolPages, labels, f, now) + m.mb.RecordMysqlBufferPoolPagesDataPoint(now, f, "flushed") } case "Innodb_buffer_pool_pages_free": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.BufferPoolPages, pdata.NewAttributeValueString("free")) - addToDoubleMetric(bufferPoolPages, labels, f, now) + m.mb.RecordMysqlBufferPoolPagesDataPoint(now, f, "free") } case "Innodb_buffer_pool_pages_misc": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.BufferPoolPages, pdata.NewAttributeValueString("misc")) - addToDoubleMetric(bufferPoolPages, labels, f, now) + m.mb.RecordMysqlBufferPoolPagesDataPoint(now, f, "misc") } case "Innodb_buffer_pool_pages_total": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.BufferPoolPages, pdata.NewAttributeValueString("total")) - addToDoubleMetric(bufferPoolPages, labels, f, now) + m.mb.RecordMysqlBufferPoolPagesDataPoint(now, f, "total") } // buffer_pool_operations case "Innodb_buffer_pool_read_ahead_rnd": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.BufferPoolOperations, pdata.NewAttributeValueString("read_ahead_rnd")) - addToIntMetric(bufferPoolOperations, labels, i, now) + m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "read_ahead_rnd") } case "Innodb_buffer_pool_read_ahead": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.BufferPoolOperations, pdata.NewAttributeValueString("read_ahead")) - addToIntMetric(bufferPoolOperations, labels, i, now) + m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "read_ahead") } case "Innodb_buffer_pool_read_ahead_evicted": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.BufferPoolOperations, pdata.NewAttributeValueString("read_ahead_evicted")) - addToIntMetric(bufferPoolOperations, labels, i, now) + m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "read_ahead_evicted") } case "Innodb_buffer_pool_read_requests": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.BufferPoolOperations, pdata.NewAttributeValueString("read_requests")) - addToIntMetric(bufferPoolOperations, labels, i, now) + m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "read_requests") } case "Innodb_buffer_pool_reads": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.BufferPoolOperations, pdata.NewAttributeValueString("reads")) - addToIntMetric(bufferPoolOperations, labels, i, now) + m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "reads") } case "Innodb_buffer_pool_wait_free": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.BufferPoolOperations, pdata.NewAttributeValueString("wait_free")) - addToIntMetric(bufferPoolOperations, labels, i, now) + m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "wait_free") } case "Innodb_buffer_pool_write_requests": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.BufferPoolOperations, pdata.NewAttributeValueString("write_requests")) - addToIntMetric(bufferPoolOperations, labels, i, now) + m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, i, "write_requests") } // buffer_pool_size case "Innodb_buffer_pool_bytes_data": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.BufferPoolSize, pdata.NewAttributeValueString("data")) - addToDoubleMetric(bufferPoolSize, labels, f, now) + m.mb.RecordMysqlBufferPoolSizeDataPoint(now, f, "data") } case "Innodb_buffer_pool_bytes_dirty": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.BufferPoolSize, pdata.NewAttributeValueString("dirty")) - addToDoubleMetric(bufferPoolSize, labels, f, now) + m.mb.RecordMysqlBufferPoolSizeDataPoint(now, f, "dirty") } // commands case "Com_stmt_execute": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Command, pdata.NewAttributeValueString("execute")) - addToIntMetric(commands, labels, i, now) + m.mb.RecordMysqlCommandsDataPoint(now, i, "execute") } case "Com_stmt_close": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Command, pdata.NewAttributeValueString("close")) - addToIntMetric(commands, labels, i, now) + m.mb.RecordMysqlCommandsDataPoint(now, i, "close") } case "Com_stmt_fetch": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Command, pdata.NewAttributeValueString("fetch")) - addToIntMetric(commands, labels, i, now) + m.mb.RecordMysqlCommandsDataPoint(now, i, "fetch") } case "Com_stmt_prepare": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Command, pdata.NewAttributeValueString("prepare")) - addToIntMetric(commands, labels, i, now) + m.mb.RecordMysqlCommandsDataPoint(now, i, "prepare") } case "Com_stmt_reset": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Command, pdata.NewAttributeValueString("reset")) - addToIntMetric(commands, labels, i, now) + m.mb.RecordMysqlCommandsDataPoint(now, i, "reset") } case "Com_stmt_send_long_data": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Command, pdata.NewAttributeValueString("send_long_data")) - addToIntMetric(commands, labels, i, now) + m.mb.RecordMysqlCommandsDataPoint(now, i, "send_long_data") } // handlers case "Handler_commit": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("commit")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "commit") } case "Handler_delete": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("delete")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "delete") } case "Handler_discover": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("discover")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "discover") } case "Handler_external_lock": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("lock")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "lock") } case "Handler_mrr_init": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("mrr_init")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "mrr_init") } case "Handler_prepare": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("prepare")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "prepare") } case "Handler_read_first": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("read_first")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "read_first") } case "Handler_read_key": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("read_key")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "read_key") } case "Handler_read_last": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("read_last")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "read_last") } case "Handler_read_next": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("read_next")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "read_next") } case "Handler_read_prev": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("read_prev")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "read_prev") } case "Handler_read_rnd": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("read_rnd")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "read_rnd") } case "Handler_read_rnd_next": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("read_rnd_next")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "read_rnd_next") } case "Handler_rollback": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("rollback")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "rollback") } case "Handler_savepoint": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("savepoint")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "savepoint") } case "Handler_savepoint_rollback": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("savepoint_rollback")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "savepoint_rollback") } case "Handler_update": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("update")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "update") } case "Handler_write": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Handler, pdata.NewAttributeValueString("write")) - addToIntMetric(handlers, labels, i, now) + m.mb.RecordMysqlHandlersDataPoint(now, i, "write") } // double_writes case "Innodb_dblwr_pages_written": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.DoubleWrites, pdata.NewAttributeValueString("written")) - addToIntMetric(doubleWrites, labels, i, now) + m.mb.RecordMysqlDoubleWritesDataPoint(now, i, "written") } case "Innodb_dblwr_writes": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.DoubleWrites, pdata.NewAttributeValueString("writes")) - addToIntMetric(doubleWrites, labels, i, now) + m.mb.RecordMysqlDoubleWritesDataPoint(now, i, "writes") } // log_operations case "Innodb_log_waits": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.LogOperations, pdata.NewAttributeValueString("waits")) - addToIntMetric(logOperations, labels, i, now) + m.mb.RecordMysqlLogOperationsDataPoint(now, i, "waits") } case "Innodb_log_write_requests": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.LogOperations, pdata.NewAttributeValueString("requests")) - addToIntMetric(logOperations, labels, i, now) + m.mb.RecordMysqlLogOperationsDataPoint(now, i, "requests") } case "Innodb_log_writes": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.LogOperations, pdata.NewAttributeValueString("writes")) - addToIntMetric(logOperations, labels, i, now) + m.mb.RecordMysqlLogOperationsDataPoint(now, i, "writes") } // operations case "Innodb_data_fsyncs": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Operations, pdata.NewAttributeValueString("fsyncs")) - addToIntMetric(operations, labels, i, now) + m.mb.RecordMysqlOperationsDataPoint(now, i, "fsyncs") } case "Innodb_data_reads": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Operations, pdata.NewAttributeValueString("reads")) - addToIntMetric(operations, labels, i, now) + m.mb.RecordMysqlOperationsDataPoint(now, i, "reads") } case "Innodb_data_writes": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Operations, pdata.NewAttributeValueString("writes")) - addToIntMetric(operations, labels, i, now) + m.mb.RecordMysqlOperationsDataPoint(now, i, "writes") } // page_operations case "Innodb_pages_created": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.PageOperations, pdata.NewAttributeValueString("created")) - addToIntMetric(pageOperations, labels, i, now) + m.mb.RecordMysqlPageOperationsDataPoint(now, i, "created") } case "Innodb_pages_read": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.PageOperations, pdata.NewAttributeValueString("read")) - addToIntMetric(pageOperations, labels, i, now) + m.mb.RecordMysqlPageOperationsDataPoint(now, i, "read") } case "Innodb_pages_written": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.PageOperations, pdata.NewAttributeValueString("written")) - addToIntMetric(pageOperations, labels, i, now) + m.mb.RecordMysqlPageOperationsDataPoint(now, i, "written") } // row_locks case "Innodb_row_lock_waits": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.RowLocks, pdata.NewAttributeValueString("waits")) - addToIntMetric(rowLocks, labels, i, now) + m.mb.RecordMysqlRowLocksDataPoint(now, i, "waits") } case "Innodb_row_lock_time": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.RowLocks, pdata.NewAttributeValueString("time")) - addToIntMetric(rowLocks, labels, i, now) + m.mb.RecordMysqlRowLocksDataPoint(now, i, "time") } // row_operations case "Innodb_rows_deleted": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.RowOperations, pdata.NewAttributeValueString("deleted")) - addToIntMetric(rowOperations, labels, i, now) + m.mb.RecordMysqlRowOperationsDataPoint(now, i, "deleted") } case "Innodb_rows_inserted": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.RowOperations, pdata.NewAttributeValueString("inserted")) - addToIntMetric(rowOperations, labels, i, now) + m.mb.RecordMysqlRowOperationsDataPoint(now, i, "inserted") } case "Innodb_rows_read": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.RowOperations, pdata.NewAttributeValueString("read")) - addToIntMetric(rowOperations, labels, i, now) + m.mb.RecordMysqlRowOperationsDataPoint(now, i, "read") } case "Innodb_rows_updated": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.RowOperations, pdata.NewAttributeValueString("updated")) - addToIntMetric(rowOperations, labels, i, now) + m.mb.RecordMysqlRowOperationsDataPoint(now, i, "updated") } // locks case "Table_locks_immediate": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Locks, pdata.NewAttributeValueString("immediate")) - addToIntMetric(locks, labels, i, now) + m.mb.RecordMysqlLocksDataPoint(now, i, "immediate") } case "Table_locks_waited": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Locks, pdata.NewAttributeValueString("waited")) - addToIntMetric(locks, labels, i, now) + m.mb.RecordMysqlLocksDataPoint(now, i, "waited") } // sorts case "Sort_merge_passes": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Sorts, pdata.NewAttributeValueString("merge_passes")) - addToIntMetric(sorts, labels, i, now) + m.mb.RecordMysqlSortsDataPoint(now, i, "merge_passes") } case "Sort_range": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Sorts, pdata.NewAttributeValueString("range")) - addToIntMetric(sorts, labels, i, now) + m.mb.RecordMysqlSortsDataPoint(now, i, "range") } case "Sort_rows": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Sorts, pdata.NewAttributeValueString("rows")) - addToIntMetric(sorts, labels, i, now) + m.mb.RecordMysqlSortsDataPoint(now, i, "rows") } case "Sort_scan": if i, ok := m.parseInt(k, v); ok { - labels.Insert(metadata.A.Sorts, pdata.NewAttributeValueString("scan")) - addToIntMetric(sorts, labels, i, now) + m.mb.RecordMysqlSortsDataPoint(now, i, "scan") } // threads case "Threads_cached": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.Threads, pdata.NewAttributeValueString("cached")) - addToDoubleMetric(threads, labels, f, now) + m.mb.RecordMysqlThreadsDataPoint(now, f, "cached") } case "Threads_connected": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.Threads, pdata.NewAttributeValueString("connected")) - addToDoubleMetric(threads, labels, f, now) + m.mb.RecordMysqlThreadsDataPoint(now, f, "connected") } case "Threads_created": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.Threads, pdata.NewAttributeValueString("created")) - addToDoubleMetric(threads, labels, f, now) + m.mb.RecordMysqlThreadsDataPoint(now, f, "created") } case "Threads_running": if f, ok := m.parseFloat(k, v); ok { - labels.Insert(metadata.A.Threads, pdata.NewAttributeValueString("running")) - addToDoubleMetric(threads, labels, f, now) + m.mb.RecordMysqlThreadsDataPoint(now, f, "running") } } } + + m.mb.Emit(ilm.Metrics()) return md, nil } diff --git a/receiver/mysqlreceiver/scraper_test.go b/receiver/mysqlreceiver/scraper_test.go index ca84446d7bd5d..9b1e7c7bba252 100644 --- a/receiver/mysqlreceiver/scraper_test.go +++ b/receiver/mysqlreceiver/scraper_test.go @@ -32,13 +32,10 @@ import ( ) func TestScrape(t *testing.T) { - cfg := &Config{ - Username: "otel", - Password: "otel", - NetAddr: confignet.NetAddr{ - Endpoint: "localhost:3306", - }, - } + cfg := createDefaultConfig().(*Config) + cfg.Username = "otel" + cfg.Password = "otel" + cfg.NetAddr = confignet.NetAddr{Endpoint: "localhost:3306"} scraper := newMySQLScraper(zap.NewNop(), cfg) scraper.sqlclient = &mockClient{}