Skip to content

Commit be48c0f

Browse files
[CHORE] locking targets per job
Signed-off-by: Nicolas Takashi <[email protected]>
1 parent 7437bb8 commit be48c0f

File tree

1 file changed

+16
-4
lines changed

1 file changed

+16
-4
lines changed

cmd/otel-allocator/target/discovery.go

+16-4
Original file line numberDiff line numberDiff line change
@@ -138,12 +138,16 @@ func (m *Discoverer) Run() error {
138138
return nil
139139
}
140140

141+
// UpdateTsets updates the target sets to be scraped.
141142
func (m *Discoverer) UpdateTsets(tsets map[string][]*targetgroup.Group) {
142143
m.mtxScrape.Lock()
143144
m.targetSets = tsets
144145
m.mtxScrape.Unlock()
145146
}
146147

148+
// reloader triggers a reload of the scrape configs at regular intervals.
149+
// The time between reloads is defined by reloadIntervalDuration to avoid overloading the system
150+
// with too many reloads, because some service discovery mechanisms can be quite chatty.
147151
func (m *Discoverer) reloader() {
148152
reloadIntervalDuration := model.Duration(5 * time.Second)
149153
ticker := time.NewTicker(time.Duration(reloadIntervalDuration))
@@ -165,6 +169,8 @@ func (m *Discoverer) reloader() {
165169
}
166170
}
167171

172+
// Reload triggers a reload of the scrape configs.
173+
// This will process the target groups and update the targets concurrently.
168174
func (m *Discoverer) Reload() {
169175
m.mtxScrape.Lock()
170176
var wg sync.WaitGroup
@@ -176,7 +182,12 @@ func (m *Discoverer) Reload() {
176182
wg.Add(1)
177183
// Run the sync in parallel as these take a while and at high load can't catch up.
178184
go func(jobName string, groups []*targetgroup.Group) {
179-
m.processTargetGroups(jobName, groups, targets)
185+
processedTargets := m.processTargetGroups(jobName, groups)
186+
m.mtxTargets.Lock()
187+
for k, v := range processedTargets {
188+
targets[k] = v
189+
}
190+
m.mtxTargets.Unlock()
180191
wg.Done()
181192
}(jobName, groups)
182193
}
@@ -185,9 +196,11 @@ func (m *Discoverer) Reload() {
185196
m.processTargetsCallBack(targets)
186197
}
187198

188-
func (m *Discoverer) processTargetGroups(jobName string, groups []*targetgroup.Group, targets map[string]*Item) {
199+
// processTargetGroups processes the target groups and returns a map of targets.
200+
func (m *Discoverer) processTargetGroups(jobName string, groups []*targetgroup.Group) map[string]*Item {
189201
builder := labels.NewBuilder(labels.Labels{})
190202
timer := prometheus.NewTimer(processTargetGroupsDuration.WithLabelValues(jobName))
203+
targets := map[string]*Item{}
191204
defer timer.ObserveDuration()
192205
var count float64 = 0
193206
for _, tg := range groups {
@@ -203,12 +216,11 @@ func (m *Discoverer) processTargetGroups(jobName string, groups []*targetgroup.G
203216
builder.Set(string(ln), string(lv))
204217
}
205218
item := NewItem(jobName, string(t[model.AddressLabel]), builder.Labels(), "")
206-
m.mtxTargets.Lock()
207219
targets[item.Hash()] = item
208-
m.mtxTargets.Unlock()
209220
}
210221
}
211222
targetsDiscovered.WithLabelValues(jobName).Set(count)
223+
return targets
212224
}
213225

214226
// Run receives and saves target set updates and triggers the scraping loops reloading.

0 commit comments

Comments
 (0)