forked from open-telemetry/opentelemetry-collector-contrib
-
Notifications
You must be signed in to change notification settings - Fork 0
/
buffer.go
197 lines (176 loc) · 5.67 KB
/
buffer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http:https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package splunkhecexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter"
import (
"bytes"
"compress/gzip"
"errors"
"io"
)
var (
errOverCapacity = errors.New("over capacity")
)
// Minimum number of bytes to compress. 1500 is the MTU of an ethernet frame.
const minCompressionLen = 1500
// bufferState encapsulates intermediate buffer state when pushing data
type bufferState struct {
compressionAvailable bool
bufferMaxLen uint
maxEventLength uint
writer io.Writer
buf *bytes.Buffer
resource int // index in ResourceLogs/ResourceMetrics/ResourceSpans list
library int // index in ScopeLogs/ScopeMetrics/ScopeSpans list
record int // index in Logs/Metrics/Spans list
rawLength int
}
func (b *bufferState) compressionEnabled() bool {
_, ok := b.writer.(*cancellableGzipWriter)
return ok
}
func (b *bufferState) containsData() bool {
return b.rawLength > 0
}
func (b *bufferState) reset() {
b.buf.Reset()
b.writer = &cancellableBytesWriter{innerWriter: b.buf, maxCapacity: b.bufferMaxLen}
b.rawLength = 0
}
func (b *bufferState) Read(p []byte) (n int, err error) {
return b.buf.Read(p)
}
func (b *bufferState) Close() error {
if _, ok := b.writer.(*cancellableGzipWriter); ok {
return b.writer.(*cancellableGzipWriter).close()
}
return nil
}
// accept returns true if data is accepted by the buffer
func (b *bufferState) accept(data []byte) (bool, error) {
if len(data)+b.rawLength > int(b.maxEventLength) {
return false, nil
}
_, err := b.writer.Write(data)
overCapacity := errors.Is(err, errOverCapacity)
bufLen := b.buf.Len()
if overCapacity {
bufLen += len(data)
}
if b.compressionAvailable && !b.compressionEnabled() && bufLen > minCompressionLen {
// switch over to a zip buffer.
tmpBuf := bytes.NewBuffer(make([]byte, 0, b.bufferMaxLen+bufCapPadding))
writer := gzip.NewWriter(tmpBuf)
writer.Reset(tmpBuf)
zipWriter := &cancellableGzipWriter{
innerBuffer: tmpBuf,
innerWriter: writer,
// 8 bytes required for the zip footer.
maxCapacity: b.bufferMaxLen - 8,
}
if b.bufferMaxLen == 0 {
zipWriter.maxCapacity = 0
}
// we write the bytes buffer into the zip buffer. Any error from this is I/O, and should stop the process.
if _, err2 := zipWriter.Write(b.buf.Bytes()); err2 != nil {
return false, err2
}
b.writer = zipWriter
b.buf = tmpBuf
// if the byte writer was over capacity, try to write the new entry in the zip writer:
if overCapacity {
if _, err2 := zipWriter.Write(data); err2 != nil {
overCapacity2 := errors.Is(err2, errOverCapacity)
if overCapacity2 {
return false, nil
}
return false, err2
}
}
b.rawLength += len(data)
return true, nil
}
if overCapacity {
return false, nil
}
b.rawLength += len(data)
return true, err
}
type cancellableBytesWriter struct {
innerWriter *bytes.Buffer
maxCapacity uint
}
func (c *cancellableBytesWriter) Write(b []byte) (int, error) {
if c.maxCapacity == 0 {
return c.innerWriter.Write(b)
}
if c.innerWriter.Len()+len(b) > int(c.maxCapacity) {
return 0, errOverCapacity
}
return c.innerWriter.Write(b)
}
type cancellableGzipWriter struct {
innerBuffer *bytes.Buffer
innerWriter *gzip.Writer
maxCapacity uint
len int
}
func (c *cancellableGzipWriter) Write(b []byte) (int, error) {
if c.maxCapacity == 0 {
return c.innerWriter.Write(b)
}
c.len += len(b)
// if we see that at a 50% compression rate, we'd be over max capacity, start flushing.
if (c.len / 2) > int(c.maxCapacity) {
// we flush so the length of the underlying buffer is accurate.
if err := c.innerWriter.Flush(); err != nil {
return 0, err
}
}
// we find that the new content uncompressed, added to our buffer, would overflow our max capacity.
if c.innerBuffer.Len()+len(b) > int(c.maxCapacity) {
// so we create a copy of our content and add this new data, compressed, to check that it fits.
copyBuf := bytes.NewBuffer(make([]byte, 0, c.maxCapacity+bufCapPadding))
copyBuf.Write(c.innerBuffer.Bytes())
writerCopy := gzip.NewWriter(copyBuf)
writerCopy.Reset(copyBuf)
if _, err := writerCopy.Write(b); err != nil {
return 0, err
}
if err := writerCopy.Flush(); err != nil {
return 0, err
}
// we find that even compressed, the data overflows.
if copyBuf.Len() > int(c.maxCapacity) {
return 0, errOverCapacity
}
}
return c.innerWriter.Write(b)
}
func (c *cancellableGzipWriter) close() error {
return c.innerWriter.Close()
}
func makeBlankBufferState(bufCap uint, compressionAvailable bool, maxEventLength uint) *bufferState {
// Buffer of JSON encoded Splunk events, last record is expected to overflow bufCap, hence the padding
buf := bytes.NewBuffer(make([]byte, 0, bufCap+bufCapPadding))
return &bufferState{
compressionAvailable: compressionAvailable,
writer: &cancellableBytesWriter{innerWriter: buf, maxCapacity: bufCap},
buf: buf,
bufferMaxLen: bufCap,
maxEventLength: maxEventLength,
resource: 0,
library: 0,
record: 0,
}
}