Skip to content

Commit

Permalink
[chore][pkg/stanza] Log matching paths (open-telemetry#27859)
Browse files Browse the repository at this point in the history
This PR adds a debug log to give visibility into the exact outcome of
file matching configuration.

It also removes some fragile logging expectations from batching tests. I
believe the meaningful part of the tests remain intact.
  • Loading branch information
djaglowski committed Oct 19, 2023
1 parent 1e793c7 commit ac020a2
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 65 deletions.
1 change: 1 addition & 0 deletions pkg/stanza/fileconsumer/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ func (m *Manager) poll(ctx context.Context) {
if err != nil {
m.Debugf("finding files: %v", err)
}
m.Debugf("matched files", zap.Strings("paths", matches))

for len(matches) > m.maxBatchFiles {
m.consume(ctx, matches[:m.maxBatchFiles])
Expand Down
65 changes: 0 additions & 65 deletions pkg/stanza/fileconsumer/file_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/featuregate"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest/observer"

"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/attrs"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher"
Expand Down Expand Up @@ -1014,12 +1011,9 @@ func TestFileBatching(t *testing.T) {
files := 100
linesPerFile := 10
maxConcurrentFiles := 20
maxBatchFiles := maxConcurrentFiles / 2
// Explicitly setting maxBatches to ensure a value of 0 does not enforce a limit
maxBatches := 0

expectedBatches := files / maxBatchFiles // assumes no remainder

tempDir := t.TempDir()
cfg := NewConfig().includeDir(tempDir)
cfg.StartAt = "beginning"
Expand All @@ -1029,9 +1023,6 @@ func TestFileBatching(t *testing.T) {
operator, _ := buildTestManager(t, cfg, withEmitChan(emitCalls))
operator.persister = testutil.NewUnscopedMockPersister()

core, observedLogs := observer.New(zap.DebugLevel)
operator.SugaredLogger = zap.New(core).Sugar()

temps := make([]*os.File, 0, files)
for i := 0; i < files; i++ {
temps = append(temps, openTemp(t, tempDir))
Expand All @@ -1054,23 +1045,6 @@ func TestFileBatching(t *testing.T) {
actualTokens = append(actualTokens, waitForNTokens(t, emitCalls, len(expectedTokens))...)
require.ElementsMatch(t, expectedTokens, actualTokens)

// During the first poll, we expect one log per batch and one log per file
require.Equal(t, files+expectedBatches, observedLogs.Len())
logNum := 0
for b := 0; b < expectedBatches; b++ {
log := observedLogs.All()[logNum]
require.Equal(t, "Consuming files", log.Message)
require.Equal(t, zapcore.DebugLevel, log.Level)
logNum++

for f := 0; f < maxBatchFiles; f++ {
log = observedLogs.All()[logNum]
require.Equal(t, "Started watching file", log.Message)
require.Equal(t, zapcore.InfoLevel, log.Level)
logNum++
}
}

// Write more logs to each file so we can validate that all files are still known
expectedTokens = make([][]byte, 0, files*linesPerFile)
for i, temp := range temps {
Expand All @@ -1087,15 +1061,6 @@ func TestFileBatching(t *testing.T) {
actualTokens = make([][]byte, 0, files*linesPerFile)
actualTokens = append(actualTokens, waitForNTokens(t, emitCalls, len(expectedTokens))...)
require.ElementsMatch(t, expectedTokens, actualTokens)

// During the second poll, we only expect one log per batch
require.Equal(t, files+expectedBatches*2, observedLogs.Len())
for b := logNum; b < observedLogs.Len(); b++ {
log := observedLogs.All()[logNum]
require.Equal(t, "Consuming files", log.Message)
require.Equal(t, zapcore.DebugLevel, log.Level)
logNum++
}
}

func TestFileBatchingRespectsStartAtEnd(t *testing.T) {
Expand Down Expand Up @@ -1454,7 +1419,6 @@ func TestMaxBatching(t *testing.T) {
maxBatchFiles := maxConcurrentFiles / 2
maxBatches := 2

expectedBatches := maxBatches
expectedMaxFilesPerPoll := maxBatches * maxBatchFiles

tempDir := t.TempDir()
Expand All @@ -1466,9 +1430,6 @@ func TestMaxBatching(t *testing.T) {
operator, _ := buildTestManager(t, cfg, withEmitChan(emitCalls))
operator.persister = testutil.NewUnscopedMockPersister()

core, observedLogs := observer.New(zap.DebugLevel)
operator.SugaredLogger = zap.New(core).Sugar()

temps := make([]*os.File, 0, files)
for i := 0; i < files; i++ {
temps = append(temps, openTemp(t, tempDir))
Expand All @@ -1490,23 +1451,6 @@ func TestMaxBatching(t *testing.T) {
actualTokens = append(actualTokens, waitForNTokens(t, emitCalls, numExpectedTokens)...)
require.Len(t, actualTokens, numExpectedTokens)

// During the first poll, we expect one log per batch and one log per file
require.Equal(t, expectedMaxFilesPerPoll+expectedBatches, observedLogs.Len())
logNum := 0
for b := 0; b < expectedBatches; b++ {
log := observedLogs.All()[logNum]
require.Equal(t, "Consuming files", log.Message)
require.Equal(t, zapcore.DebugLevel, log.Level)
logNum++

for f := 0; f < maxBatchFiles; f++ {
log = observedLogs.All()[logNum]
require.Equal(t, "Started watching file", log.Message)
require.Equal(t, zapcore.InfoLevel, log.Level)
logNum++
}
}

// Write more logs to each file so we can validate that all files are still known
for i, temp := range temps {
for j := 0; j < linesPerFile; j++ {
Expand All @@ -1521,15 +1465,6 @@ func TestMaxBatching(t *testing.T) {
actualTokens = make([][]byte, 0, numExpectedTokens)
actualTokens = append(actualTokens, waitForNTokens(t, emitCalls, numExpectedTokens)...)
require.Len(t, actualTokens, numExpectedTokens)

// During the second poll, we only expect one log per batch
require.Equal(t, expectedMaxFilesPerPoll+expectedBatches*2, observedLogs.Len())
for b := logNum; b < observedLogs.Len(); b++ {
log := observedLogs.All()[logNum]
require.Equal(t, "Consuming files", log.Message)
require.Equal(t, zapcore.DebugLevel, log.Level)
logNum++
}
}

// TestReadExistingLogsWithHeader tests that, when starting from beginning, we
Expand Down

0 comments on commit ac020a2

Please sign in to comment.