Skip to content

Commit

Permalink
[chore] Enable lint for integration test files and fix issues (open-t…
Browse files Browse the repository at this point in the history
…elemetry#17992)

[chore] Lint integration test files and fix issues
  • Loading branch information
djaglowski committed Jan 24, 2023
1 parent a9b315a commit 5b2be2a
Show file tree
Hide file tree
Showing 13 changed files with 73 additions and 52 deletions.
2 changes: 1 addition & 1 deletion Makefile.Common
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ fmt: $(GOIMPORTS)

.PHONY: lint
lint: $(LINT) checklicense misspell
$(LINT) run --allow-parallel-runners
$(LINT) run --allow-parallel-runners --build-tags integration

.PHONY: tidy
tidy:
Expand Down
4 changes: 2 additions & 2 deletions extension/observer/dockerobserver/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ func TestObserverUpdatesEndpointsIntegration(t *testing.T) {
})
require.Nil(t, err)
defer func() {
err := container.Terminate(ctx)
err = container.Terminate(ctx)
require.Nil(t, err)
}()
require.NotNil(t, container)
Expand All @@ -124,7 +124,7 @@ func TestObserverUpdatesEndpointsIntegration(t *testing.T) {
tcDockerClient, _, _, err := testcontainers.NewDockerClient()
require.Nil(t, err)

tcDockerClient.ContainerRename(context.Background(), container.GetContainerID(), "nginx-updated")
require.NoError(t, tcDockerClient.ContainerRename(context.Background(), container.GetContainerID(), "nginx-updated"))

require.Eventually(t, func() bool { return mn.ChangeCount() == 1 }, 3*time.Second, 10*time.Millisecond)
require.Equal(t, 1, mn.AddCount())
Expand Down
16 changes: 8 additions & 8 deletions receiver/aerospikereceiver/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ func populateMetrics(t *testing.T, host *as.Host) {
queryPolicyShort := as.NewQueryPolicy()
queryPolicyShort.ShortQuery = true

var writePolicy *as.WritePolicy = nil
var writePolicy *as.WritePolicy

// *** Primary Index Queries *** //

Expand Down Expand Up @@ -175,28 +175,28 @@ func populateMetrics(t *testing.T, host *as.Host) {

// perform a basic secondary index query
s6 := as.NewStatement(ns, set)
s6.SetFilter(filt)
require.NoError(t, s6.SetFilter(filt))
RecordsWaitAndCheck(func() (RecordsCheckable, as.Error) {
return c.Query(queryPolicy, s6)
}, t)

// aggregation query on secondary index
s7 := as.NewStatement(ns, set)
s7.SetFilter(filt)
require.NoError(t, s7.SetFilter(filt))
RecordsWaitAndCheck(func() (RecordsCheckable, as.Error) {
return c.QueryAggregate(queryPolicy, s7, "/"+udfFile, udfFunc, as.StringValue(sibin))
}, t)

// background udf query on secondary index
s8 := as.NewStatement(ns, set)
s8.SetFilter(filt)
require.NoError(t, s8.SetFilter(filt))
doneWaitAndCheck(func() (doneCheckable, as.Error) {
return c.ExecuteUDF(queryPolicy, s8, "/"+udfFile, udfFunc, as.StringValue(sibin))
}, t)

// ops query on secondary index
s9 := as.NewStatement(ns, set)
s9.SetFilter(filt)
require.NoError(t, s9.SetFilter(filt))
siwbin := as.NewBin("bin4", 400)
siops := as.PutOp(siwbin)
doneWaitAndCheck(func() (doneCheckable, as.Error) {
Expand All @@ -205,7 +205,7 @@ func populateMetrics(t *testing.T, host *as.Host) {

// perform a basic short secondary index query
s10 := as.NewStatement(ns, set)
s10.SetFilter(filt)
require.NoError(t, s10.SetFilter(filt))
RecordsWaitAndCheck(func() (RecordsCheckable, as.Error) {
return c.Query(queryPolicyShort, s10)
}, t)
Expand Down Expand Up @@ -248,7 +248,7 @@ func populateMetrics(t *testing.T, host *as.Host) {
geoSet := "geoset"
for i, b := range bins {
key, _ := as.NewKey(ns, geoSet, i)
err := c.Put(nil, key, b)
err = c.Put(nil, key, b)
require.NoError(t, err, "failed to write geojson record")
}

Expand All @@ -260,7 +260,7 @@ func populateMetrics(t *testing.T, host *as.Host) {
// run geoJSON query
geoStm1 := as.NewStatement(ns, geoSet)
geoFilt1 := as.NewGeoWithinRadiusFilter("coord", float64(13.009318762), float64(80.003157854), float64(50000))
geoStm1.SetFilter(geoFilt1)
require.NoError(t, geoStm1.SetFilter(geoFilt1))
RecordsWaitAndCheck(func() (RecordsCheckable, as.Error) {
return c.Query(queryPolicy, geoStm1)
}, t)
Expand Down
3 changes: 3 additions & 0 deletions receiver/bigipreceiver/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.

//go:build integration
// +build integration

package bigipreceiver

import (
Expand Down
10 changes: 5 additions & 5 deletions receiver/elasticsearchreceiver/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func TestElasticsearchIntegration(t *testing.T) {
err := featuregate.GlobalRegistry().Apply(map[string]bool{emitClusterHealthDetailedShardMetricsID: false, emitAllIndexOperationMetricsID: false})
require.NoError(t, err)

//Starts an elasticsearch docker container
// Starts an elasticsearch docker container
t.Run("Running elasticsearch 7.9", func(t *testing.T) {
t.Parallel()
container := getContainer(t, containerRequest7_9_3)
Expand All @@ -87,13 +87,13 @@ func TestElasticsearchIntegration(t *testing.T) {
}, 2*time.Minute, 1*time.Second, "failed to receive more than 0 metrics")
require.NoError(t, rcvr.Shutdown(context.Background()))

actualMtrics := consumer.AllMetrics()[0]
actualMetrics := consumer.AllMetrics()[0]

expectedFile := filepath.Join("testdata", "integration", "expected.7_9_3.json")
expectedMetrics, err := golden.ReadMetrics(expectedFile)
require.NoError(t, err)

pmetrictest.CompareMetrics(expectedMetrics, actualMtrics,
pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, //nolint:errcheck
pmetrictest.IgnoreResourceMetricsOrder(),
pmetrictest.IgnoreMetricValues(),
pmetrictest.IgnoreResourceAttributeValue("elasticsearch.node.name"))
Expand Down Expand Up @@ -122,13 +122,13 @@ func TestElasticsearchIntegration(t *testing.T) {
}, 2*time.Minute, 1*time.Second, "failed to receive more than 0 metrics")
require.NoError(t, rcvr.Shutdown(context.Background()))

actualMtrics := consumer.AllMetrics()[0]
actualMetrics := consumer.AllMetrics()[0]

expectedFile := filepath.Join("testdata", "integration", "expected.7_16_3.json")
expectedMetrics, err := golden.ReadMetrics(expectedFile)
require.NoError(t, err)

pmetrictest.CompareMetrics(expectedMetrics, actualMtrics,
pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, //nolint:errcheck
pmetrictest.IgnoreResourceMetricsOrder(),
pmetrictest.IgnoreMetricValues(),
pmetrictest.IgnoreResourceAttributeValue("elasticsearch.node.name"))
Expand Down
18 changes: 12 additions & 6 deletions receiver/jmxreceiver/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"io"
"net/http"
"os"
"path/filepath"
"testing"
"time"

Expand Down Expand Up @@ -70,7 +69,7 @@ func (suite *JMXIntegrationSuite) TearDownSuite() {
}

func downloadJMXMetricGathererJAR(url string) (string, error) {
resp, err := http.Get(url)
resp, err := http.Get(url) //nolint:gosec
if err != nil {
return "", err
}
Expand All @@ -90,7 +89,7 @@ func cassandraContainer(t *testing.T) testcontainers.Container {
ctx := context.Background()
req := testcontainers.ContainerRequest{
FromDockerfile: testcontainers.FromDockerfile{
Context: filepath.Join("testdata"),
Context: "testdata",
Dockerfile: "Dockerfile.cassandra",
},
ExposedPorts: []string{"7199:7199"},
Expand Down Expand Up @@ -133,16 +132,23 @@ func getLogsOnFailure(t *testing.T, logObserver *observer.ObservedLogs) {
}
}

// Workaround to avoid unused errors
var skip = func(t *testing.T, why string) {
t.Skip(why)
}

func (suite *JMXIntegrationSuite) TestJMXReceiverHappyPath() {

for version, jar := range suite.VersionToJar {
t := suite.T()
// Run one test per JMX receiver version we're integrating with.
t.Run(version, func(t *testing.T) {
t.Skip("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5874")
skip(t, "https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5874")

cassandra := cassandraContainer(t)
defer cassandra.Terminate(context.Background())
defer func() {
require.NoError(t, cassandra.Terminate(context.Background()))
}()
hostname, err := cassandra.Host(context.Background())
require.NoError(t, err)

Expand Down Expand Up @@ -242,7 +248,7 @@ func TestJMXReceiverInvalidOTLPEndpointIntegration(t *testing.T) {
params := receivertest.NewNopCreateSettings()
cfg := &Config{
CollectionInterval: 100 * time.Millisecond,
Endpoint: fmt.Sprintf("service:jmx:rmi:https:///jndi/rmi:https://localhost:7199/jmxrmi"),
Endpoint: "service:jmx:rmi:https:///jndi/rmi:https://localhost:7199/jmxrmi",
JARPath: "/notavalidpath",
TargetSystem: "jvm",
OTLPExporterConfig: otlpExporterConfig{
Expand Down
48 changes: 32 additions & 16 deletions receiver/jmxreceiver/internal/subprocess/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,10 @@ func (suite *SubprocessIntegrationSuite) TestHappyPath() {
defer cancel()

subprocess, procInfo, findProcessInfo := suite.prepareSubprocess(&Config{})
subprocess.Start(ctx)
defer subprocess.Shutdown(ctx)
assert.NoError(t, subprocess.Start(ctx))
defer func() {
assert.NoError(t, subprocess.Shutdown(ctx))
}()

assert.Eventually(t, findProcessInfo, 5*time.Second, 10*time.Millisecond)
require.NotNil(t, *procInfo)
Expand All @@ -146,8 +148,10 @@ func (suite *SubprocessIntegrationSuite) TestWithArgs() {
defer cancel()

subprocess, procInfo, findProcessInfo := suite.prepareSubprocess(&Config{Args: []string{"myArgs"}})
subprocess.Start(ctx)
defer subprocess.Shutdown(ctx)
assert.NoError(t, subprocess.Start(ctx))
defer func() {
assert.NoError(t, subprocess.Shutdown(ctx))
}()

require.Eventually(t, findProcessInfo, 5*time.Second, 10*time.Millisecond)
require.NotNil(t, *procInfo)
Expand All @@ -170,8 +174,10 @@ func (suite *SubprocessIntegrationSuite) TestWithEnvVars() {
}

subprocess, procInfo, findProcessInfo := suite.prepareSubprocess(config)
subprocess.Start(ctx)
defer subprocess.Shutdown(ctx)
assert.NoError(t, subprocess.Start(ctx))
defer func() {
assert.NoError(t, subprocess.Shutdown(ctx))
}()
require.Eventually(t, findProcessInfo, 5*time.Second, 10*time.Millisecond)
require.NotNil(t, *procInfo)

Expand All @@ -188,8 +194,10 @@ func (suite *SubprocessIntegrationSuite) TestWithAutoRestart() {

restartDelay := 100 * time.Millisecond
subprocess, procInfo, findProcessInfo := suite.prepareSubprocess(&Config{RestartOnError: true, RestartDelay: &restartDelay})
subprocess.Start(ctx)
defer subprocess.Shutdown(ctx)
assert.NoError(t, subprocess.Start(ctx))
defer func() {
assert.NoError(t, subprocess.Shutdown(ctx))
}()

require.Eventually(t, findProcessInfo, 5*time.Second, 10*time.Millisecond)
require.NotNil(t, *procInfo)
Expand All @@ -214,8 +222,10 @@ func (suite *SubprocessIntegrationSuite) TestSendingStdin() {
defer cancel()

subprocess, procInfo, findProcessInfo := suite.prepareSubprocess(&Config{StdInContents: "mystdincontents"})
subprocess.Start(ctx)
defer subprocess.Shutdown(ctx)
assert.NoError(t, subprocess.Start(ctx))
defer func() {
assert.NoError(t, subprocess.Shutdown(ctx))
}()

require.Eventually(t, findProcessInfo, 5*time.Second, 10*time.Millisecond)
require.NotNil(t, *procInfo)
Expand All @@ -238,8 +248,10 @@ func (suite *SubprocessIntegrationSuite) TestSendingStdinFails() {
return intentionalError
}

subprocess.Start(ctx)
defer subprocess.Shutdown(ctx)
assert.NoError(t, subprocess.Start(ctx))
defer func() {
assert.NoError(t, subprocess.Shutdown(ctx))
}()

matched := func() bool {
died := len(logObserver.FilterMessage("subprocess died").All()) == 1
Expand All @@ -259,8 +271,10 @@ func (suite *SubprocessIntegrationSuite) TestSubprocessBadExec() {
logger := zap.New(logCore)

subprocess := NewSubprocess(&Config{ExecutablePath: "/does/not/exist"}, logger)
subprocess.Start(ctx)
defer subprocess.Shutdown(ctx)
assert.NoError(t, subprocess.Start(ctx))
defer func() {
assert.NoError(t, subprocess.Shutdown(ctx))
}()

matched := func() bool {
return len(logObserver.FilterMessage("subprocess died").All()) == 1
Expand All @@ -278,8 +292,10 @@ func (suite *SubprocessIntegrationSuite) TestSubprocessSuccessfullyReturns() {
// process exit. Here we sleep before returning, but this will need to be addressed if short lived processes
// become an intended use case without forcing users to read stdout before closing.
subprocess := NewSubprocess(&Config{ExecutablePath: "sh", Args: []string{"-c", "echo finished; sleep .1"}}, zap.NewNop())
subprocess.Start(ctx)
defer subprocess.Shutdown(ctx)
assert.NoError(t, subprocess.Start(ctx))
defer func() {
assert.NoError(t, subprocess.Shutdown(ctx))
}()

matched := func() bool {
_, ok := <-subprocess.shutdownSignal
Expand Down
4 changes: 2 additions & 2 deletions receiver/kafkametricsreceiver/kafkametrics_e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ const (
zkPort = 2181
kafkaPort = 9092
kafkaZkImage = "johnnypark/kafka-zookeeper"
//only one metric, number of brokers, will be reported.
// only one metric, number of brokers, will be reported.
expectedMetrics = 1
)

Expand Down Expand Up @@ -64,7 +64,7 @@ func TestIntegrationSingleNode(t *testing.T) {
WaitingFor: wait.ForAll(
wait.ForListeningPort("2181/tcp").WithStartupTimeout(time.Minute*2),
wait.ForListeningPort("9092/tcp").WithStartupTimeout(time.Minute*2),
).WithStartupTimeout(time.Minute * 2),
).WithDeadline(time.Minute * 2),
}
container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Expand Down
2 changes: 1 addition & 1 deletion receiver/nginxreceiver/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,5 +87,5 @@ func TestNginxIntegration(t *testing.T) {

actualMetrics := consumer.AllMetrics()[0]

pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreMetricValues())
pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreMetricValues()) //nolint:errcheck
}
2 changes: 1 addition & 1 deletion receiver/rabbitmqreceiver/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ func TestRabbitmqIntegration(t *testing.T) {
expectedMetrics, err := golden.ReadMetrics(expectedFile)
require.NoError(t, err)

pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreMetricValues())
require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreMetricValues()))
})
}

Expand Down
2 changes: 1 addition & 1 deletion receiver/riakreceiver/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,5 +87,5 @@ func TestRiakIntegration(t *testing.T) {

actualMetrics := consumer.AllMetrics()[0]

pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreMetricValues())
pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreMetricValues()) //nolint:errcheck
}
1 change: 1 addition & 0 deletions receiver/snmpreceiver/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ func TestSnmpReceiverIntegration(t *testing.T) {
factories.Receivers[typeStr] = factory
configFile := filepath.Join("testdata", "integration", testCase.configFilename)
cfg, err := otelcoltest.LoadConfigAndValidate(configFile, factories)
require.NoError(t, err)
snmpConfig := cfg.Receivers[component.NewID(typeStr)].(*Config)

consumer := new(consumertest.MetricsSink)
Expand Down
13 changes: 4 additions & 9 deletions receiver/sqlqueryreceiver/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,11 +157,6 @@ func TestPostgresIntegration(t *testing.T) {
testPGTypeMetrics(t, rms.At(1))
}

// workaround to avoid "unused" lint errors which test is skipped
var skip = func(t *testing.T, why string) {
t.Skip(why)
}

// This test ensures the collector can connect to an Oracle DB, and properly get metrics. It's not intended to
// test the receiver itself.
func TestOracleDBIntegration(t *testing.T) {
Expand Down Expand Up @@ -310,10 +305,10 @@ func testPGTypeMetrics(t *testing.T, rm pmetric.ResourceMetrics) {
}
}

func assertIntGaugeEquals(t *testing.T, expected int, metric pmetric.Metric) bool {
return assert.EqualValues(t, expected, metric.Gauge().DataPoints().At(0).IntValue())
func assertIntGaugeEquals(t *testing.T, expected int, metric pmetric.Metric) {
assert.EqualValues(t, expected, metric.Gauge().DataPoints().At(0).IntValue())
}

func assertDoubleGaugeEquals(t *testing.T, expected float64, metric pmetric.Metric) bool {
return assert.InDelta(t, expected, metric.Gauge().DataPoints().At(0).DoubleValue(), 0.1)
func assertDoubleGaugeEquals(t *testing.T, expected float64, metric pmetric.Metric) {
assert.InDelta(t, expected, metric.Gauge().DataPoints().At(0).DoubleValue(), 0.1)
}

0 comments on commit 5b2be2a

Please sign in to comment.