@@ -15,31 +15,26 @@ import (
15
15
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/sender"
16
16
)
17
17
18
- type batch struct {
19
- ctx context.Context
20
- req request.Request
21
- done multiDone
22
- }
23
-
24
18
type batcherSettings [K any ] struct {
25
- sizerType request.SizerType
26
- sizer request.Sizer [K ]
27
- next sender.SendFunc [K ]
28
- maxWorkers int
19
+ sizerType request.SizerType
20
+ sizer request.Sizer [K ]
21
+ partitioner Partitioner [K ]
22
+ next sender.SendFunc [K ]
23
+ maxWorkers int
29
24
}
30
25
31
26
// defaultBatcher continuously batch incoming requests and flushes asynchronously if minimum size limit is met or on timeout.
32
27
type defaultBatcher struct {
33
- cfg BatchConfig
34
- workerPool chan struct {}
35
- sizerType request.SizerType
36
- sizer request.Sizer [request.Request ]
37
- consumeFunc sender.SendFunc [request.Request ]
38
- stopWG sync.WaitGroup
39
- currentBatchMu sync. Mutex
40
- currentBatch * batch
41
- timer * time. Timer
42
- shutdownCh chan struct {}
28
+ cfg BatchConfig
29
+ workerPool chan struct {}
30
+ sizerType request.SizerType
31
+ sizer request.Sizer [request.Request ]
32
+ consumeFunc sender.SendFunc [request.Request ]
33
+ stopWG sync.WaitGroup
34
+ ticker * time. Ticker
35
+ shutdownCh chan struct {}
36
+
37
+ partitionManager partitionManager
43
38
}
44
39
45
40
func newDefaultBatcher (bCfg BatchConfig , bSet batcherSettings [request.Request ]) * defaultBatcher {
@@ -52,30 +47,26 @@ func newDefaultBatcher(bCfg BatchConfig, bSet batcherSettings[request.Request])
52
47
}
53
48
}
54
49
return & defaultBatcher {
55
- cfg : bCfg ,
56
- workerPool : workerPool ,
57
- sizerType : bSet .sizerType ,
58
- sizer : bSet .sizer ,
59
- consumeFunc : bSet .next ,
60
- stopWG : sync.WaitGroup {},
61
- shutdownCh : make (chan struct {}, 1 ),
62
- }
63
- }
64
-
65
- func (qb * defaultBatcher ) resetTimer () {
66
- if qb .cfg .FlushTimeout > 0 {
67
- qb .timer .Reset (qb .cfg .FlushTimeout )
50
+ cfg : bCfg ,
51
+ workerPool : workerPool ,
52
+ sizerType : bSet .sizerType ,
53
+ sizer : bSet .sizer ,
54
+ consumeFunc : bSet .next ,
55
+ stopWG : sync.WaitGroup {},
56
+ shutdownCh : make (chan struct {}, 1 ),
57
+ partitionManager : newPartitionManager (bSet .partitioner ),
68
58
}
69
59
}
70
60
71
61
func (qb * defaultBatcher ) Consume (ctx context.Context , req request.Request , done Done ) {
72
- qb .currentBatchMu .Lock ()
62
+ shard := qb .partitionManager .getShard (ctx , req )
63
+ shard .Lock ()
73
64
74
- if qb . currentBatch == nil {
65
+ if shard . batch == nil {
75
66
reqList , mergeSplitErr := req .MergeSplit (ctx , int (qb .cfg .MaxSize ), qb .sizerType , nil )
76
67
if mergeSplitErr != nil || len (reqList ) == 0 {
77
68
done .OnDone (mergeSplitErr )
78
- qb . currentBatchMu .Unlock ()
69
+ shard .Unlock ()
79
70
return
80
71
}
81
72
@@ -90,27 +81,27 @@ func (qb *defaultBatcher) Consume(ctx context.Context, req request.Request, done
90
81
if qb .sizer .Sizeof (lastReq ) < qb .cfg .MinSize {
91
82
// Do not flush the last item and add it to the current batch.
92
83
reqList = reqList [:len (reqList )- 1 ]
93
- qb .currentBatch = & batch {
94
- ctx : ctx ,
95
- req : lastReq ,
96
- done : multiDone {done },
84
+ shard .batch = & batch {
85
+ ctx : ctx ,
86
+ req : lastReq ,
87
+ done : multiDone {done },
88
+ created : time .Now (),
97
89
}
98
- qb .resetTimer ()
99
90
}
100
91
101
- qb . currentBatchMu .Unlock ()
92
+ shard .Unlock ()
102
93
for i := 0 ; i < len (reqList ); i ++ {
103
94
qb .flush (ctx , reqList [i ], done )
104
95
}
105
96
106
97
return
107
98
}
108
99
109
- reqList , mergeSplitErr := qb . currentBatch .req .MergeSplit (ctx , int (qb .cfg .MaxSize ), qb .sizerType , req )
100
+ reqList , mergeSplitErr := shard . batch .req .MergeSplit (ctx , int (qb .cfg .MaxSize ), qb .sizerType , req )
110
101
// If failed to merge signal all Done callbacks from current batch as well as the current request and reset the current batch.
111
102
if mergeSplitErr != nil || len (reqList ) == 0 {
112
103
done .OnDone (mergeSplitErr )
113
- qb . currentBatchMu .Unlock ()
104
+ shard .Unlock ()
114
105
return
115
106
}
116
107
@@ -126,15 +117,15 @@ func (qb *defaultBatcher) Consume(ctx context.Context, req request.Request, done
126
117
127
118
// Logic on how to deal with the current batch:
128
119
// TODO: Deal with merging Context.
129
- qb . currentBatch .req = reqList [0 ]
130
- qb . currentBatch . done = append (qb . currentBatch .done , done )
120
+ shard .req = reqList [0 ]
121
+ shard . done = append (shard .done , done )
131
122
// Save the "currentBatch" if we need to flush it, because we want to execute flush without holding the lock, and
132
123
// cannot unlock and re-lock because we are not done processing all the responses.
133
124
var firstBatch * batch
134
125
// Need to check the currentBatch if more than 1 result returned or if 1 result return but larger than MinSize.
135
- if len (reqList ) > 1 || qb .sizer .Sizeof (qb . currentBatch .req ) >= qb .cfg .MinSize {
136
- firstBatch = qb . currentBatch
137
- qb . currentBatch = nil
126
+ if len (reqList ) > 1 || qb .sizer .Sizeof (shard . batch .req ) >= qb .cfg .MinSize {
127
+ firstBatch = shard . batch
128
+ shard . batch = nil
138
129
}
139
130
// At this moment we dealt with the first result which is iter in the currentBatch or in the `firstBatch` we will flush.
140
131
reqList = reqList [1 :]
@@ -145,16 +136,16 @@ func (qb *defaultBatcher) Consume(ctx context.Context, req request.Request, done
145
136
if qb .sizer .Sizeof (lastReq ) < qb .cfg .MinSize {
146
137
// Do not flush the last item and add it to the current batch.
147
138
reqList = reqList [:len (reqList )- 1 ]
148
- qb .currentBatch = & batch {
149
- ctx : ctx ,
150
- req : lastReq ,
151
- done : multiDone {done },
139
+ shard .batch = & batch {
140
+ ctx : ctx ,
141
+ req : lastReq ,
142
+ done : multiDone {done },
143
+ created : time .Now (),
152
144
}
153
- qb .resetTimer ()
154
145
}
155
146
}
156
147
157
- qb . currentBatchMu .Unlock ()
148
+ shard .Unlock ()
158
149
if firstBatch != nil {
159
150
qb .flush (firstBatch .ctx , firstBatch .req , firstBatch .done )
160
151
}
@@ -172,8 +163,8 @@ func (qb *defaultBatcher) startTimeBasedFlushingGoroutine() {
172
163
select {
173
164
case <- qb .shutdownCh :
174
165
return
175
- case <- qb .timer .C :
176
- qb .flushCurrentBatchIfNecessary ()
166
+ case <- qb .ticker .C :
167
+ qb .flushCurrentBatchIfNecessary (false )
177
168
}
178
169
}
179
170
}()
@@ -182,27 +173,32 @@ func (qb *defaultBatcher) startTimeBasedFlushingGoroutine() {
182
173
// Start starts the goroutine that reads from the queue and flushes asynchronously.
183
174
func (qb * defaultBatcher ) Start (_ context.Context , _ component.Host ) error {
184
175
if qb .cfg .FlushTimeout > 0 {
185
- qb .timer = time .NewTimer (qb .cfg .FlushTimeout )
176
+ qb .ticker = time .NewTicker (qb .cfg .FlushTimeout )
186
177
qb .startTimeBasedFlushingGoroutine ()
187
178
}
188
179
189
180
return nil
190
181
}
191
182
192
183
// flushCurrentBatchIfNecessary sends out the current request batch if it is not nil
193
- func (qb * defaultBatcher ) flushCurrentBatchIfNecessary () {
194
- qb .currentBatchMu .Lock ()
195
- if qb .currentBatch == nil {
196
- qb .currentBatchMu .Unlock ()
197
- return
198
- }
199
- batchToFlush := qb .currentBatch
200
- qb .currentBatch = nil
201
- qb .currentBatchMu .Unlock ()
184
+ func (qb * defaultBatcher ) flushCurrentBatchIfNecessary (forceFlush bool ) {
185
+ qb .partitionManager .forEachShard (func (shard * shard ) {
186
+ shard .Lock ()
187
+ if shard .batch == nil {
188
+ shard .Unlock ()
189
+ return
190
+ }
191
+ if ! forceFlush && time .Since (shard .created ) < qb .cfg .FlushTimeout {
192
+ shard .Unlock ()
193
+ return
194
+ }
195
+ batchToFlush := shard .batch
196
+ shard .batch = nil
197
+ shard .Unlock ()
202
198
203
- // flush() blocks until successfully started a goroutine for flushing.
204
- qb .flush (batchToFlush .ctx , batchToFlush .req , batchToFlush .done )
205
- qb . resetTimer ( )
199
+ // flush() blocks until successfully started a goroutine for flushing.
200
+ qb .flush (batchToFlush .ctx , batchToFlush .req , batchToFlush .done )
201
+ } )
206
202
}
207
203
208
204
// flush starts a goroutine that calls consumeFunc. It blocks until a worker is available if necessary.
@@ -224,7 +220,7 @@ func (qb *defaultBatcher) flush(ctx context.Context, req request.Request, done D
224
220
func (qb * defaultBatcher ) Shutdown (_ context.Context ) error {
225
221
close (qb .shutdownCh )
226
222
// Make sure execute one last flush if necessary.
227
- qb .flushCurrentBatchIfNecessary ()
223
+ qb .flushCurrentBatchIfNecessary (true )
228
224
qb .stopWG .Wait ()
229
225
return nil
230
226
}
0 commit comments