@@ -11,7 +11,6 @@ import (
11
11
"go.opentelemetry.io/collector/exporter/exporterbatcher"
12
12
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/request"
13
13
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/sender"
14
- "go.opentelemetry.io/collector/exporter/exporterqueue"
15
14
"go.opentelemetry.io/collector/pipeline"
16
15
)
17
16
@@ -20,7 +19,7 @@ type Settings[K any] struct {
20
19
Signal pipeline.Signal
21
20
ID component.ID
22
21
Telemetry component.TelemetrySettings
23
- Encoding exporterqueue. Encoding [K ]
22
+ Encoding Encoding [K ]
24
23
Sizers map [exporterbatcher.SizerType ]Sizer [K ]
25
24
}
26
25
@@ -31,20 +30,17 @@ type QueueBatch struct {
31
30
32
31
func NewQueueBatch (
33
32
qSet Settings [request.Request ],
34
- qCfg exporterqueue.Config ,
35
- bCfg exporterbatcher.Config ,
33
+ cfg Config ,
36
34
next sender.SendFunc [request.Request ],
37
35
) (* QueueBatch , error ) {
38
36
var b Batcher [request.Request ]
39
- switch bCfg . Enabled {
40
- case false :
37
+ switch {
38
+ case cfg . Batch == nil :
41
39
b = newDisabledBatcher [request.Request ](next )
42
40
default :
43
- b = newDefaultBatcher (bCfg , next , qCfg .NumConsumers )
44
- }
45
- // TODO: https://github.com/open-telemetry/opentelemetry-collector/issues/12244
46
- if bCfg .Enabled {
47
- qCfg .NumConsumers = 1
41
+ // TODO: https://github.com/open-telemetry/opentelemetry-collector/issues/12244
42
+ cfg .NumConsumers = 1
43
+ b = newDefaultBatcher (* cfg .Batch , next , cfg .NumConsumers )
48
44
}
49
45
50
46
sizer , ok := qSet .Sizers [exporterbatcher .SizerTypeRequests ]
@@ -54,25 +50,25 @@ func NewQueueBatch(
54
50
55
51
var q Queue [request.Request ]
56
52
switch {
57
- case ! qCfg . Enabled :
53
+ case cfg . WaitForResult :
58
54
q = newDisabledQueue (b .Consume )
59
- case qCfg .StorageID != nil :
55
+ case cfg .StorageID != nil :
60
56
q = newAsyncQueue (newPersistentQueue [request.Request ](persistentQueueSettings [request.Request ]{
61
57
sizer : sizer ,
62
- capacity : int64 (qCfg .QueueSize ),
63
- blocking : qCfg . Blocking ,
58
+ capacity : int64 (cfg .QueueSize ),
59
+ blocking : cfg . BlockOnOverflow ,
64
60
signal : qSet .Signal ,
65
- storageID : * qCfg .StorageID ,
61
+ storageID : * cfg .StorageID ,
66
62
encoding : qSet .Encoding ,
67
63
id : qSet .ID ,
68
64
telemetry : qSet .Telemetry ,
69
- }), qCfg .NumConsumers , b .Consume )
65
+ }), cfg .NumConsumers , b .Consume )
70
66
default :
71
67
q = newAsyncQueue (newMemoryQueue [request.Request ](memoryQueueSettings [request.Request ]{
72
68
sizer : sizer ,
73
- capacity : int64 (qCfg .QueueSize ),
74
- blocking : qCfg . Blocking ,
75
- }), qCfg .NumConsumers , b .Consume )
69
+ capacity : int64 (cfg .QueueSize ),
70
+ blocking : cfg . BlockOnOverflow ,
71
+ }), cfg .NumConsumers , b .Consume )
76
72
}
77
73
78
74
oq , err := newObsQueue (qSet , q )
@@ -85,11 +81,13 @@ func NewQueueBatch(
85
81
86
82
// Start is invoked during service startup.
87
83
func (qs * QueueBatch ) Start (ctx context.Context , host component.Host ) error {
88
- if err := qs .queue .Start (ctx , host ); err != nil {
84
+ if err := qs .batcher .Start (ctx , host ); err != nil {
89
85
return err
90
86
}
91
-
92
- return qs .batcher .Start (ctx , host )
87
+ if err := qs .queue .Start (ctx , host ); err != nil {
88
+ return errors .Join (err , qs .batcher .Shutdown (ctx ))
89
+ }
90
+ return nil
93
91
}
94
92
95
93
// Shutdown is invoked during service shutdown.
0 commit comments