Skip to content

Commit

Permalink
[processor/spanmetrics] Fix flaky test (open-telemetry#18024)
Browse files Browse the repository at this point in the history
* Fix flaky test from observed logs assertion

Signed-off-by: albertteoh <[email protected]>
  • Loading branch information
albertteoh committed Jan 26, 2023
1 parent 049c2b4 commit 02aa38d
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 11 deletions.
16 changes: 16 additions & 0 deletions .chloggen/spanmetricsprocessor-fix-flaky-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: spanmetricsprocessor

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Fix a flaky test caused by a race condition between WaitGroup completion and observed logs being written and flushed.

# One or more tracking issues related to the change
issues: [18014]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
29 changes: 18 additions & 11 deletions processor/spanmetricsprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,15 @@ func TestProcessorConcurrentShutdown(t *testing.T) {
}()
}
wg.Wait()
allLogs := observedLogs.All()
require.NotEmpty(t, allLogs)

// Allow time for log observer to sync all logs emitted.
// Even though the WaitGroup has been given the "done" signal, there's still a potential race condition
// between the WaitGroup being unblocked and when the logs will be flushed.
var allLogs []observer.LoggedEntry
assert.Eventually(t, func() bool {
allLogs = observedLogs.All()
return len(allLogs) > 0
}, time.Second, time.Millisecond*10)

// Starting spanmetricsprocessor...
// Started spanmetricsprocessor...
Expand Down Expand Up @@ -288,13 +295,13 @@ func TestProcessorConsumeMetricsErrors(t *testing.T) {
wg.Wait()

// Allow time for log observer to sync all logs emitted.
// Unfortunately, we can't tell the log observer to wait until all logs have been synced/received.
// Core/Logger.Sync() does not appear to achieve the desired behavior of syncing observedLogs with the logger.
time.Sleep(time.Millisecond)

// Verify
allLogs := observedLogs.All()
require.NotEmpty(t, allLogs)
// Even though the WaitGroup has been given the "done" signal, there's still a potential race condition
// between the WaitGroup being unblocked and when the logs will be flushed.
var allLogs []observer.LoggedEntry
assert.Eventually(t, func() bool {
allLogs = observedLogs.All()
return len(allLogs) > 0
}, time.Second, time.Millisecond*10)

assert.Equal(t, "Failed ConsumeMetrics", allLogs[0].Message)
}
Expand Down Expand Up @@ -412,7 +419,7 @@ func TestMetricKeyCache(t *testing.T) {
require.NoError(t, err)
// 2 key was cached, 1 key was evicted and cleaned after the processing
assert.Eventually(t, func() bool {
return assert.Equal(t, DimensionsCacheSize, p.metricKeyToDimensions.Len())
return p.metricKeyToDimensions.Len() == DimensionsCacheSize
}, 10*time.Second, time.Millisecond*100)

// consume another batch of traces
Expand All @@ -421,7 +428,7 @@ func TestMetricKeyCache(t *testing.T) {

// 2 key was cached, other keys were evicted and cleaned after the processing
assert.Eventually(t, func() bool {
return assert.Equal(t, DimensionsCacheSize, p.metricKeyToDimensions.Len())
return p.metricKeyToDimensions.Len() == DimensionsCacheSize
}, 10*time.Second, time.Millisecond*100)
}

Expand Down

0 comments on commit 02aa38d

Please sign in to comment.