Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
29 changes: 14 additions & 15 deletions cmd/config-reloader/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
"time"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
Expand Down Expand Up @@ -57,12 +56,12 @@ func main() {
flag.Parse()

logger := log.NewJSONLogger(log.NewSyncWriter(os.Stderr))
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
logger = log.With(logger, "caller", log.DefaultCaller)
logger = logger.With("ts", log.DefaultTimestampUTC)
logger = logger.With("caller", log.DefaultCaller)

if *configDirOutput != "" && *configDir == "" {
//nolint:errcheck
level.Error(logger).Log("msg", "config-dir-output specified without config-dir")
logger.Error("config-dir-output specified without config-dir")
os.Exit(1)
}

Expand All @@ -75,7 +74,7 @@ func main() {
reloadURL, err := url.Parse(*reloadURLStr)
if err != nil {
//nolint:errcheck
level.Error(logger).Log("msg", "parsing reloader URL failed", "err", err)
logger.Error("parsing reloader URL failed", "err", err)
os.Exit(1)
}

Expand All @@ -87,7 +86,7 @@ func main() {
req, err := http.NewRequest(http.MethodGet, *readyURLStr, nil)
if err != nil {
//nolint:errcheck
level.Error(logger).Log("msg", "creating request", "err", err)
logger.Error("creating request", "err", err)
os.Exit(1)
}

Expand All @@ -99,31 +98,31 @@ func main() {

go func() {
//nolint:errcheck
level.Info(logger).Log("msg", "ensure ready-url is healthy")
logger.Info("ensure ready-url is healthy")
for {
select {
case <-term:
//nolint:errcheck
level.Info(logger).Log("msg", "received SIGTERM, exiting gracefully...")
logger.Info("received SIGTERM, exiting gracefully...")
os.Exit(0)
case <-ticker.C:
resp, err := http.DefaultClient.Do(req)
if err != nil {
if acceptableNoConnectionErrors <= 0 {
//nolint:errcheck
level.Error(logger).Log("msg", "polling ready-url", "err", err, "no-connection-threshold", *readyProbingNoConnectionThreshold)
logger.Error("polling ready-url", "err", err, "no-connection-threshold", *readyProbingNoConnectionThreshold)
os.Exit(1)
}
acceptableNoConnectionErrors--
continue
}
if err := resp.Body.Close(); err != nil {
//nolint:errcheck
level.Warn(logger).Log("msg", "unable to close response body", "err", err)
logger.Warn("unable to close response body", "err", err)
}
if resp.StatusCode == http.StatusOK {
//nolint:errcheck
level.Info(logger).Log("msg", "ready-url is healthy")
logger.Info("ready-url is healthy")
ticker.Stop()
done <- true
return
Expand Down Expand Up @@ -176,7 +175,7 @@ func main() {
select {
case <-term:
//nolint:errcheck
level.Info(logger).Log("msg", "received SIGTERM, exiting gracefully...")
logger.Info("received SIGTERM, exiting gracefully...")
case <-cancel:
}
return nil
Expand All @@ -192,21 +191,21 @@ func main() {

g.Add(func() error {
//nolint:errcheck
level.Info(logger).Log("msg", "Starting web server for metrics", "listen", *listenAddress)
logger.Info("Starting web server for metrics", "listen", *listenAddress)
return server.ListenAndServe()
}, func(error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
if err := server.Shutdown(ctx); err != nil {
//nolint:errcheck
level.Error(logger).Log("msg", "Server failed to shut down gracefully.")
logger.Error("Server failed to shut down gracefully.")
}
cancel()
})
}

if err := g.Run(); err != nil {
//nolint:errcheck
level.Error(logger).Log("msg", "running reloader failed", "err", err)
logger.Error("running reloader failed", "err", err)
os.Exit(1)
}
}
Expand Down
44 changes: 15 additions & 29 deletions cmd/datasource-syncer/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,17 @@ import (
"errors"
"flag"
"fmt"
"log/slog"
"net/http"
"os"
"strings"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
grafana "github.com/grafana/grafana-api-golang-client"
"github.com/hashicorp/go-cleanhttp"
"golang.org/x/mod/semver"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"github.com/prometheus/common/promslog"
)

var (
Expand Down Expand Up @@ -60,41 +60,34 @@ var (
func main() {
flag.Parse()

logger := log.NewJSONLogger(log.NewSyncWriter(os.Stderr))
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
logger = log.With(logger, "caller", log.DefaultCaller)
logger := promslog.New(slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{AddSource: true}))

if len(*datasourceUIDList) == 0 {
//nolint:errcheck
level.Error(logger).Log("msg", "--datasource-uid must be set")
logger.Error("--datasource-uid must be set")
os.Exit(1)
}

if *grafanaAPIToken == "" {
envToken := os.Getenv("GRAFANA_SERVICE_ACCOUNT_TOKEN")
if envToken == "" {
//nolint:errcheck
level.Error(logger).Log("msg", "--grafana-api-token or the environment variable GRAFANA_SERVICE_ACCOUNT_TOKEN must be set")
logger.Error("--grafana-api-token or the environment variable GRAFANA_SERVICE_ACCOUNT_TOKEN must be set")
os.Exit(1)
}
grafanaAPIToken = &envToken
}
if *grafanaEndpoint == "" {
//nolint:errcheck
level.Error(logger).Log("msg", "--grafana-api-endpoint must be set")
logger.Error("--grafana-api-endpoint must be set")
os.Exit(1)
}

if *projectID == "" {
//nolint:errcheck
level.Error(logger).Log("msg", "--project-id must be set")
logger.Error("--project-id must be set")
os.Exit(1)
}

client, err := getTLSClient(*certFile, *keyFile, *caFile, *insecureSkipVerify)
if err != nil {
//nolint:errcheck
level.Error(logger).Log("msg", "couldn't create client", "err", err)
logger.Error("couldn't create client", "err", err)
os.Exit(1)
}

Expand All @@ -103,15 +96,13 @@ func main() {
Client: client,
})
if err != nil {
//nolint:errcheck
level.Error(logger).Log("msg", "couldn't create grafana client", "err", err)
logger.Error("couldn't create grafana client", "err", err)
os.Exit(1)
}

token, err := getOAuth2Token(*credentialsFile)
if err != nil {
//nolint:errcheck
level.Error(logger).Log("msg", "couldn't get Google OAuth2 token", "err", err)
logger.Error("couldn't get Google OAuth2 token", "err", err)
os.Exit(1)
}

Expand All @@ -127,35 +118,30 @@ func main() {
dataSource, err := grafanaClient.DataSourceByUID(datasourceUID)
if err != nil {
dsErrors = append(dsErrors, datasourceUID)
//nolint:errcheck
level.Error(logger).Log("msg", fmt.Sprintf("error fetching data source config of data source uid: %s", datasourceUID), "err", err)
logger.Error(fmt.Sprintf("error fetching data source config of data source uid: %s", datasourceUID), "err", err)
continue
}

dataSource, err = buildUpdateDataSourceRequest(*dataSource, token)
if err != nil {
dsErrors = append(dsErrors, datasourceUID)
//nolint:errcheck
level.Error(logger).Log("msg", fmt.Sprintf("couldn't build data source update request for data source uid: %s", datasourceUID), "err", err)
logger.Error(fmt.Sprintf("couldn't build data source update request for data source uid: %s", datasourceUID), "err", err)
continue
}

err = grafanaClient.UpdateDataSourceByUID(dataSource)
if err != nil {
dsErrors = append(dsErrors, datasourceUID)
//nolint:errcheck
level.Error(logger).Log("msg", fmt.Sprintf("couldn't send update data source request to data source id: %s", datasourceUID), "err", err)
logger.Error(fmt.Sprintf("couldn't send update data source request to data source id: %s", datasourceUID), "err", err)
continue
}
dsSuccessfullyUpdated = append(dsSuccessfullyUpdated, datasourceUID)
}
if len(dsSuccessfullyUpdated) != 0 {
//nolint:errcheck
level.Info(logger).Log("msg", fmt.Sprintf("Updated Grafana data source uids: %s", dsSuccessfullyUpdated))
logger.Info(fmt.Sprintf("Updated Grafana data source uids: %s", dsSuccessfullyUpdated))
}
if len(dsErrors) != 0 {
//nolint:errcheck
level.Error(logger).Log("msg", fmt.Sprintf("Failed to update Grafana data source uids: %s", dsErrors))
logger.Error(fmt.Sprintf("Failed to update Grafana data source uids: %s", dsErrors))
Copy link

@SuperQ SuperQ Mar 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this can fixed to eliminate the fmt.Sprintf().

Suggested change
logger.Error(fmt.Sprintf("Failed to update Grafana data source uids: %s", dsErrors))
logger.Error("Failed to update Grafana data source uids", "error", dsErrors)

os.Exit(1)
}
}
Expand Down
15 changes: 7 additions & 8 deletions cmd/frontend/internal/rule/proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,12 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"net/http"
"net/url"
"sync"

"github.com/GoogleCloudPlatform/prometheus-engine/internal/promapi"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
promapiv1 "github.com/prometheus/prometheus/web/api/v1"
)

Expand All @@ -40,13 +39,13 @@ type retriever interface {
// Results are un-sorted and concatenated as-is. In case of errors from any endpoint,
// warning log and partial results are returned.
type Proxy struct {
logger log.Logger
logger *slog.Logger
endpoints []url.URL
client retriever
}

// NewProxy creates a new proxy.
func NewProxy(logger log.Logger, c httpClient, ruleEndpoints []url.URL) *Proxy {
func NewProxy(logger *slog.Logger, c httpClient, ruleEndpoints []url.URL) *Proxy {
return &Proxy{
logger: logger,
endpoints: ruleEndpoints,
Expand Down Expand Up @@ -77,13 +76,13 @@ func (p *Proxy) Alerts(w http.ResponseWriter, req *http.Request) {
// fanoutForward calls the endpoints in parallel and returns the combined results.
func fanoutForward[T *promapiv1.Alert | *promapiv1.RuleGroup](
ctx context.Context,
logger log.Logger,
logger *slog.Logger,
ruleEndpoints []url.URL,
rawQuery string,
retrieveFn func(context.Context, url.URL, string) ([]T, error),
) ([]T, error) {
if len(ruleEndpoints) == 0 {
_ = level.Warn(logger).Log("msg", "tried to fetch rules/alerts, no endpoints (--rules.target-urls) configured")
_ = logger.Warn("tried to fetch rules/alerts, no endpoints (--rules.target-urls) configured")
return []T{}, nil
}

Expand Down Expand Up @@ -137,10 +136,10 @@ func fanoutForward[T *promapiv1.Alert | *promapiv1.RuleGroup](

if len(errs) != 0 {
if len(errs) == len(ruleEndpoints) {
_ = level.Error(logger).Log("msg", "all endpoints failed", "errors", errs)
_ = logger.Error("all endpoints failed", "errors", errs)
return nil, errAllEndpointsFailed
}
_ = level.Warn(logger).Log("msg", "some endpoints failed; potentially partial result", "errors", errs)
_ = logger.Warn("some endpoints failed; potentially partial result", "errors", errs)
}
// TODO(bwplotka): Sort?
return results, nil
Expand Down
12 changes: 6 additions & 6 deletions cmd/frontend/internal/rule/proxy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func TestProxy_handleError(t *testing.T) {
t.Parallel()

recorder := httptest.NewRecorder()
p := NewProxy(log.NewNopLogger(), nil, nil)
p := NewProxy(promslog.NewNopLogger(), nil, nil)
p.handleError(recorder, dummyRequest, tt.err)

require.Equal(t, tt.wantStatus, recorder.Code)
Expand Down Expand Up @@ -148,7 +148,7 @@ func TestFanoutForward_AlertsReturnSuccess(t *testing.T) {
{Scheme: "https", Host: "localhost:8081", Path: ""},
}

alerts, err := fanoutForward(t.Context(), log.NewNopLogger(), retrieverUrls, "?qkey=qval", func(ctx context.Context, u url.URL, s string) ([]*promapiv1.Alert, error) {
alerts, err := fanoutForward(t.Context(), promslog.NewNopLogger(), retrieverUrls, "?qkey=qval", func(ctx context.Context, u url.URL, s string) ([]*promapiv1.Alert, error) {
return retriever.Alerts(ctx, u, s)
})

Expand Down Expand Up @@ -198,7 +198,7 @@ func TestFanoutForward_AlertsTwoReturnSuccessWithOneOfTwoBrokenClients(t *testin
KeepFiringSince: nil,
},
}
alerts, err := fanoutForward(t.Context(), log.NewNopLogger(), retrieverUrls, "?qkey=qval", func(ctx context.Context, u url.URL, s string) ([]*promapiv1.Alert, error) {
alerts, err := fanoutForward(t.Context(), promslog.NewNopLogger(), retrieverUrls, "?qkey=qval", func(ctx context.Context, u url.URL, s string) ([]*promapiv1.Alert, error) {
return retriever.Alerts(ctx, u, s)
})

Expand All @@ -221,7 +221,7 @@ func TestFanoutForward_AlertsTwoReturnErrorIfAllClientsFail(t *testing.T) {
},
}
retriever := newClient(mockCli)
alerts, err := fanoutForward(t.Context(), log.NewNopLogger(), retrieverUrls, "?qkey=qval", func(ctx context.Context, u url.URL, s string) ([]*promapiv1.Alert, error) {
alerts, err := fanoutForward(t.Context(), promslog.NewNopLogger(), retrieverUrls, "?qkey=qval", func(ctx context.Context, u url.URL, s string) ([]*promapiv1.Alert, error) {
return retriever.Alerts(ctx, u, s)
})

Expand Down Expand Up @@ -293,7 +293,7 @@ func TestProxy_Alerts(t *testing.T) {
} {
t.Run(tt.name, func(t *testing.T) {
r := &Proxy{
logger: log.NewNopLogger(),
logger: promslog.NewNopLogger(),
endpoints: tt.ruleEvaluatorBaseURLs,
client: tt.ruleRetriever,
}
Expand Down Expand Up @@ -362,7 +362,7 @@ func TestProxy_RuleGroups(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &Proxy{
logger: log.NewNopLogger(),
logger: promslog.NewNopLogger(),
endpoints: tt.ruleEvaluatorBaseURLs,
client: tt.ruleRetriever,
}
Expand Down
Loading
Loading